From fba9b35696bb9f6d6905a06511dc862e8ac65670 Mon Sep 17 00:00:00 2001 From: Ting-Lan Wang Date: Wed, 29 May 2024 16:17:23 -0400 Subject: [PATCH 01/24] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 6629d976..38c48aad 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ Oracle will continue to extend `OraOperator` to support additional Oracle Databa * TCPS support customer provided certs * Execute custom scripts during DB setup/startup * Patching for SIDB Primary/Standby in Data Guard -* Long-term backup for Autonomous Databases (ADB): Moves to long-term backup and removes the deprecated mandatory backup +* Long-term backup for Autonomous Databases (ADB): Support for [long-term retention backup](https://docs.oracle.com/en/cloud/paas/autonomous-database/serverless/adbsb/backup-long-term.html) and removed support for the deprecated mandatory backup * Wallet expiry date for ADB: A user-friendly enhancement to display the wallet expiry date in the status of the associated ADB * Wait-for-Completion option for ADB: Supports `kubectl wait` command that allows the user to wait for a specific condition on ADB * OKE workload Identify: Supports OKE workload identity authentication method (i.e., uses OKE credentials). For more details, refer to [Oracle Autonomous Database (ADB) Prerequisites](docs/adb/ADB_PREREQUISITES.md#authorized-with-oke-workload-identity) From f8e273c5ab097a69ab196b4b6536bca826360c81 Mon Sep 17 00:00:00 2001 From: Ishaan Date: Thu, 4 Jul 2024 12:33:42 +0530 Subject: [PATCH 02/24] Doc and comments enhancement to support openshift --- config/samples/sidb/openshift_rbac.yaml | 72 ++++++++++++------- .../samples/sidb/singleinstancedatabase.yaml | 4 +- docs/sidb/README.md | 33 +++++++-- 3 files changed, 77 insertions(+), 32 deletions(-) diff --git a/config/samples/sidb/openshift_rbac.yaml b/config/samples/sidb/openshift_rbac.yaml index 8c88f78e..2e0d0cb2 100644 --- a/config/samples/sidb/openshift_rbac.yaml +++ b/config/samples/sidb/openshift_rbac.yaml @@ -1,73 +1,93 @@ # -# Copyright (c) 2023, Oracle and/or its affiliates. +# Copyright (c) 2024, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # - --- # Create a Security Context Contraint kind: SecurityContextConstraints - apiVersion: v1 + apiVersion: security.openshift.io/v1 + metadata: + name: sidb-oracle-user-scc + allowPrivilegedContainer: false + allowedCapabilities: + - SYS_NICE + runAsUser: + type: MustRunAs + uid: 54321 + seLinuxContext: + type: RunAsAny + fsGroup: + type: MustRunAs + ranges: + - min: 54321 + max: 54321 + supplementalGroups: + type: MustRunAs + ranges: + - min: 54321 + max: 54321 +--- +# Create a Security Context Contraint + + kind: SecurityContextConstraints + apiVersion: security.openshift.io/v1 metadata: - name: sidb-scc - namespace: default + name: sidb-oracle-root-user-scc allowPrivilegedContainer: false - users: - - system:serviceaccount:default:sidb-sa - - system:serviceaccount:default:oracle-database-operator + allowedCapabilities: + - SYS_NICE runAsUser: type: MustRunAsRange uidRangeMin: 0 - uidRangeMax: 60000 + uidRangeMax: 54321 seLinuxContext: type: RunAsAny fsGroup: type: MustRunAs ranges: - min: 0 - max: 60000 + max: 54321 supplementalGroups: type: MustRunAs ranges: - min: 0 - max: 60000 - + max: 54321 --- -# Create Service Account - apiVersion: v1 kind: ServiceAccount metadata: name: sidb-sa - namespace: default + namespace: sidb-ns --- -# Create a rbac role - kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: name: use-sidb-scc - namespace: default + namespace: sidb-ns rules: - - apiGroups: ["security.openshift.io"] - resources: ["securitycontextconstraints"] - resourceNames: ["sidb-scc"] - verbs: ["use"] + - verbs: + - use + apiGroups: + - security.openshift.io + resources: + - securitycontextconstraints + resourceNames: + - oracle-user-scc + - oracle-root-scc --- -# Create a rbac role binding - kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: use-sidb-scc - namespace: default + namespace: sidb-ns subjects: - kind: ServiceAccount name: sidb-sa + namespace: sidb-ns roleRef: kind: Role name: use-sidb-scc apiGroup: rbac.authorization.k8s.io - \ No newline at end of file diff --git a/config/samples/sidb/singleinstancedatabase.yaml b/config/samples/sidb/singleinstancedatabase.yaml index 3fcf7d97..746b09d0 100644 --- a/config/samples/sidb/singleinstancedatabase.yaml +++ b/config/samples/sidb/singleinstancedatabase.yaml @@ -64,9 +64,9 @@ spec: tcpsCertRenewInterval: 8760h ## N/A for createAs clone or standby - ## Specify both sgaSize and pgaSize (in MB) or dont specify both ## Specify Non-Zero value to use - ## You cannot change these initParams for Oracle Database Express (XE) edition + ## sgaTarget and pagAggregateTarget must be in MB + ## You cannot change these initParams for Oracle Database Express (XE) and Oracle Database Free edition initParams: cpuCount: 0 processes: 0 diff --git a/docs/sidb/README.md b/docs/sidb/README.md index 81db03b8..895c26a9 100644 --- a/docs/sidb/README.md +++ b/docs/sidb/README.md @@ -5,6 +5,7 @@ Oracle Database Operator for Kubernetes (`OraOperator`) includes the Single Inst * [Prerequisites](#prerequisites) * [Mandatory Resource Privileges](#mandatory-resource-privileges) * [Optional Resource Privileges](#optional-resource-privileges) + * [OpenShift Security Context Constraints](#openshift-security-context-constraints) * [SingleInstanceDatabase Resource](#singleinstancedatabase-resource) * [Create a Database](#create-a-database) * [New Database](#new-database) @@ -47,6 +48,7 @@ Oracle Database Operator for Kubernetes (`OraOperator`) includes the Single Inst * [Maintenance Operations](#maintenance-operations) * [Additional Information](#additional-information) + ## Prerequisites Oracle strongly recommends to comply with the [prerequisites](./PREREQUISITES.md) and the following requirements @@ -89,7 +91,30 @@ Oracle strongly recommends to comply with the [prerequisites](./PREREQUISITES.md ```sh kubectl apply -f rbac/persistent-volume-rbac.yaml ``` + + ### OpenShift Security Context Constraints + + OpenShift requires additional Security Context Constraints (SCC) for deploying and managing the SingleInstanceDatabase resource. Follow these steps to create the appropriate SCCs before deploying the SingleInstanceDatabase resource. + + 1. Create a new project/namespace for deploying the SingleInstanceDatabase resource + + ```sh + oc new-project sidb-ns + ``` + + **Note:** OpenShift recommends not to deploy in namespaces starting with `kube`, `openshift` and the `default` namespace. + + 2. Apply the file [openshift_rbac.yaml](../../config/samples/sidb/openshift_rbac.yaml) with cluster-admin user privileges. + + ```sh + oc apply -f openshift-rbac.yaml + ``` + + This would result in creation of SCC (Security Context Constraints) and serviceaccount `sidb-sa` in the namespace `sidb-ns` which has access to the SCC. + + **Note:** The above config yaml file will bind the SCC to the serviceaccount `sidb-sa` in namespace `sidb-ns`. For any other project/namespace update the file appropriately with the namespace before applying. + 3. Set the `serviceAccountName` attribute to `sidb-sa` and the namespace to `sidb-ns` in **[config/samples/sidb/singleinstancedatabase.yaml](../../config/samples/sidb/singleinstancedatabase.yaml)** before deploying the SingleInstanceDatabase resource. ## SingleInstanceDatabase Resource @@ -961,12 +986,12 @@ $ kubectl describe oraclerestdataservice ords-sample ### Template YAML -The template `.yaml` file for Oracle Rest Data Services (`OracleRestDataService` kind), including all the configurable options, is available at **[config/samples/sidb/oraclerestdataservice.yaml](config/samples/sidb/oraclerestdataservice.yaml)**. +The template `.yaml` file for Oracle Rest Data Services (`OracleRestDataService` kind), including all the configurable options, is available at **[config/samples/sidb/oraclerestdataservice.yaml](../../config/samples/sidb/oraclerestdataservice.yaml)**. **Note:** - The `adminPassword` and `ordsPassword` fields in the `oraclerestdataservice.yaml` file contains secrets for authenticating the Single Instance Database and the ORDS user with the following roles: `SQL Administrator, System Administrator, SQL Developer, oracle.dbtools.autorest.any.schema`. - To build the ORDS image, use the following instructions: [Building Oracle REST Data Services Install Images](https://github.com/oracle/docker-images/tree/main/OracleRestDataServices#building-oracle-rest-data-services-install-images). -- By default, ORDS uses self-signed certificates. To use certificates from the Certificate Authority, the ORDS image needs to be rebuilt after specifying the values of `ssl.cert` and `ssl.cert.key` in the [standalone.properties](https://github.com/oracle/docker-images/blob/main/OracleRestDataServices/dockerfiles/standalone.properties.tmpl) file. After you rebuild the ORDS image, use the rebuilt image in the **[config/samples/sidb/oraclerestdataservice.yaml](config/samples/sidb/oraclerestdataservice.yaml)** file. +- By default, ORDS uses self-signed certificates. To use certificates from the Certificate Authority, the ORDS image needs to be rebuilt after specifying the values of `ssl.cert` and `ssl.cert.key` in the [standalone.properties](https://github.com/oracle/docker-images/blob/main/OracleRestDataServices/dockerfiles/standalone.properties.tmpl) file. After you rebuild the ORDS image, use the rebuilt image in the **[config/samples/sidb/oraclerestdataservice.yaml](../../config/samples/sidb/oraclerestdataservice.yaml)** file. - If you want to install ORDS in a [prebuilt database](#provision-a-pre-built-database), make sure to attach the **database persistence** by uncommenting the `persistence` section in the **[config/samples/sidb/singleinstancedatabase_prebuiltdb.yaml](../../config/samples/sidb/singleinstancedatabase_prebuiltdb.yaml)** file, while provisioning the prebuilt database. ### REST Enable a Database @@ -1114,7 +1139,7 @@ Fetch all entries from 'DEPT' table by calling the following API Database Actions is a web-based interface that uses Oracle REST Data Services to provide development, data tools, administration and monitoring features for Oracle Database. * To use Database Actions, you must sign in as a database user whose schema has been REST-enabled. -* To enable a schema for REST, you can specify appropriate values for the `.spec.restEnableSchemas` attributes details in the sample `yaml` **[config/samples/sidb/oraclerestdataservice.yaml](config/samples/sidb/oraclerestdataservice.yaml)**, which are needed for authorizing Database Actions. +* To enable a schema for REST, you can specify appropriate values for the `.spec.restEnableSchemas` attributes details in the sample `yaml` **[config/samples/sidb/oraclerestdataservice.yaml](../../config/samples/sidb/oraclerestdataservice.yaml)**, which are needed for authorizing Database Actions. * Schema are created (if they exist) with the username as `.spec.restEnableSchema[].schema` and password as `.spec.ordsPassword.`. * UrlMapping `.spec.restEnableSchema[].urlMapping` is optional and is defaulted to `.spec.restEnableSchema[].schema`. @@ -1148,7 +1173,7 @@ Using APEX, developers can quickly develop and deploy compelling apps that solve The `OraOperator` facilitates installation of APEX in the database and also configures ORDS for it. The following section will explain installing APEX with configured ORDS: -* For quick provisioning, use the sample **[config/samples/sidb/oraclerestdataservice_apex.yaml](../../confi/samples/sidb/oraclerestdataservice_apex.yaml)** file. For example: +* For quick provisioning, use the sample **[config/samples/sidb/oraclerestdataservice_apex.yaml](../../config/samples/sidb/oraclerestdataservice_apex.yaml)** file. For example: kubectl apply -f oraclerestdataservice_apex.yaml From 9695ec2bace5f71676a6033511f139ab13b9da87 Mon Sep 17 00:00:00 2001 From: Ishaan Date: Fri, 5 Jul 2024 11:14:22 +0530 Subject: [PATCH 03/24] enhancing formatting --- config/samples/sidb/openshift_rbac.yaml | 136 ++++++++++++------------ 1 file changed, 67 insertions(+), 69 deletions(-) diff --git a/config/samples/sidb/openshift_rbac.yaml b/config/samples/sidb/openshift_rbac.yaml index 2e0d0cb2..f35188b0 100644 --- a/config/samples/sidb/openshift_rbac.yaml +++ b/config/samples/sidb/openshift_rbac.yaml @@ -4,55 +4,53 @@ # --- # Create a Security Context Contraint - - kind: SecurityContextConstraints - apiVersion: security.openshift.io/v1 - metadata: - name: sidb-oracle-user-scc - allowPrivilegedContainer: false - allowedCapabilities: - - SYS_NICE - runAsUser: - type: MustRunAs - uid: 54321 - seLinuxContext: - type: RunAsAny - fsGroup: - type: MustRunAs - ranges: - - min: 54321 - max: 54321 - supplementalGroups: +kind: SecurityContextConstraints +apiVersion: security.openshift.io/v1 +metadata: + name: sidb-oracle-user-scc +allowPrivilegedContainer: false +allowedCapabilities: + - SYS_NICE +runAsUser: + type: MustRunAs + uid: 54321 +seLinuxContext: + type: RunAsAny +fsGroup: + type: MustRunAs + ranges: + - min: 54321 + max: 54321 +supplementalGroups: type: MustRunAs ranges: - min: 54321 max: 54321 --- # Create a Security Context Contraint - - kind: SecurityContextConstraints - apiVersion: security.openshift.io/v1 - metadata: - name: sidb-oracle-root-user-scc - allowPrivilegedContainer: false - allowedCapabilities: - - SYS_NICE - runAsUser: - type: MustRunAsRange - uidRangeMin: 0 - uidRangeMax: 54321 - seLinuxContext: - type: RunAsAny - fsGroup: - type: MustRunAs - ranges: - - min: 0 - max: 54321 - supplementalGroups: - type: MustRunAs - ranges: - - min: 0 - max: 54321 +kind: SecurityContextConstraints +apiVersion: security.openshift.io/v1 +metadata: + name: sidb-oracle-root-user-scc +allowPrivilegedContainer: false +allowedCapabilities: + - SYS_NICE +runAsUser: + type: MustRunAsRange + uidRangeMin: 0 + uidRangeMax: 54321 +seLinuxContext: + type: RunAsAny +fsGroup: + type: MustRunAs + ranges: + - min: 0 + max: 54321 +supplementalGroups: + type: MustRunAs + ranges: + - min: 0 + max: 5432 --- apiVersion: v1 kind: ServiceAccount @@ -61,33 +59,33 @@ metadata: namespace: sidb-ns --- - kind: Role - apiVersion: rbac.authorization.k8s.io/v1 - metadata: - name: use-sidb-scc - namespace: sidb-ns - rules: - - verbs: - - use - apiGroups: - - security.openshift.io +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: use-sidb-scc + namespace: sidb-ns +rules: + - apiGroups: + - security.openshift.io + verbs: + - use resources: - - securitycontextconstraints + - securitycontextconstraints resourceNames: - - oracle-user-scc - - oracle-root-scc + - sidb-oracle-user-scc + - sidb-oracle-root-scc --- - kind: RoleBinding - apiVersion: rbac.authorization.k8s.io/v1 - metadata: - name: use-sidb-scc - namespace: sidb-ns - subjects: - - kind: ServiceAccount - name: sidb-sa - namespace: sidb-ns - roleRef: - kind: Role - name: use-sidb-scc - apiGroup: rbac.authorization.k8s.io +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: use-sidb-scc + namespace: sidb-ns +subjects: + - kind: ServiceAccount + name: sidb-sa + namespace: sidb-ns +roleRef: + kind: Role + name: use-sidb-scc + apiGroup: rbac.authorization.k8s.io From c8139d9f9cc74ed074dd21b1d106754be2ed1600 Mon Sep 17 00:00:00 2001 From: Ishaan Date: Fri, 5 Jul 2024 11:15:47 +0530 Subject: [PATCH 04/24] enhancing formatting openshift_rbac --- config/samples/sidb/openshift_rbac.yaml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/config/samples/sidb/openshift_rbac.yaml b/config/samples/sidb/openshift_rbac.yaml index f35188b0..021f73a2 100644 --- a/config/samples/sidb/openshift_rbac.yaml +++ b/config/samples/sidb/openshift_rbac.yaml @@ -3,6 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # --- + # Create a Security Context Contraint kind: SecurityContextConstraints apiVersion: security.openshift.io/v1 @@ -27,6 +28,7 @@ supplementalGroups: - min: 54321 max: 54321 --- + # Create a Security Context Contraint kind: SecurityContextConstraints apiVersion: security.openshift.io/v1 @@ -52,13 +54,14 @@ supplementalGroups: - min: 0 max: 5432 --- + apiVersion: v1 kind: ServiceAccount metadata: name: sidb-sa namespace: sidb-ns - --- + kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -74,8 +77,8 @@ rules: resourceNames: - sidb-oracle-user-scc - sidb-oracle-root-scc - --- + kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: From 3cd5250b839718e3793ceaaee6d8e3e2613fab8d Mon Sep 17 00:00:00 2001 From: Ishaan Date: Sat, 6 Jul 2024 00:16:14 +0530 Subject: [PATCH 05/24] correcting scc ref --- config/samples/sidb/openshift_rbac.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/samples/sidb/openshift_rbac.yaml b/config/samples/sidb/openshift_rbac.yaml index 021f73a2..6dddb80d 100644 --- a/config/samples/sidb/openshift_rbac.yaml +++ b/config/samples/sidb/openshift_rbac.yaml @@ -76,7 +76,7 @@ rules: - securitycontextconstraints resourceNames: - sidb-oracle-user-scc - - sidb-oracle-root-scc + - sidb-oracle-root-user-scc --- kind: RoleBinding From 72161cd5596fdb27e902cf10f5f9f241a4d23e20 Mon Sep 17 00:00:00 2001 From: Ishaan Date: Mon, 8 Jul 2024 10:16:19 +0530 Subject: [PATCH 06/24] fixing typo --- config/samples/sidb/singleinstancedatabase.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/samples/sidb/singleinstancedatabase.yaml b/config/samples/sidb/singleinstancedatabase.yaml index 746b09d0..b66082e1 100644 --- a/config/samples/sidb/singleinstancedatabase.yaml +++ b/config/samples/sidb/singleinstancedatabase.yaml @@ -65,7 +65,7 @@ spec: ## N/A for createAs clone or standby ## Specify Non-Zero value to use - ## sgaTarget and pagAggregateTarget must be in MB + ## sgaTarget and pgaAggregateTarget must be in MB ## You cannot change these initParams for Oracle Database Express (XE) and Oracle Database Free edition initParams: cpuCount: 0 From fc51187fc4b9f90821ddb1f5306f68c61f49fba7 Mon Sep 17 00:00:00 2001 From: Saurabh Ahuja Date: Wed, 4 Sep 2024 02:01:35 +0530 Subject: [PATCH 07/24] Squashed commit with all contents from new_stuff branch (#141) * Squashed commit with all contents from new_stuff branch * fixed missed conflict * Oracle's instructions for reporting security vulnerabilities * Installation added --- .gitignore | 4 + PROJECT | 8 + README.md | 3 +- SECURITY.md | 4 +- .../v1alpha1/adbfamily_common_utils.go | 6 +- .../autonomousdatabasebackup_types.go | 12 +- .../v1alpha1/dataguardbroker_types.go | 16 +- apis/database/v1alpha1/groupversion_info.go | 4 +- apis/database/v1alpha1/pdb_types.go | 4 + .../v1alpha1/shardingdatabase_types.go | 105 +- .../v1alpha1/shardingdatabase_webhook.go | 270 ++++ .../v1alpha1/singleinstancedatabase_types.go | 11 + apis/database/v1alpha1/webhook_suite_test.go | 8 +- .../v1alpha1/zz_generated.deepcopy.go | 41 + commons/dbcssystem/dbcs_reconciler.go | 10 +- commons/observability/constants.go | 1 + commons/oci/wallet.go | 2 +- commons/sharding/catalog.go | 50 +- commons/sharding/exec.go | 21 + commons/sharding/gsm.go | 2 +- commons/sharding/scommon.go | 219 ++- commons/sharding/shard.go | 21 +- .../crd/bases/database.oracle.com_pdbs.yaml | 5 + ...database.oracle.com_shardingdatabases.yaml | 21 +- ...se.oracle.com_singleinstancedatabases.yaml | 17 + config/database.oracle.com_DbcsSystem.yaml | 240 ++++ ...acle.com_autonomouscontainerdatabases.yaml | 117 ++ ....oracle.com_autonomousdatabasebackups.yaml | 138 ++ ...oracle.com_autonomousdatabaserestores.yaml | 138 ++ ...tabase.oracle.com_autonomousdatabases.yaml | 324 +++++ config/database.oracle.com_cdbs.yaml | 270 ++++ .../database.oracle.com_dataguardbrokers.yaml | 134 ++ ...ase.oracle.com_oraclerestdataservices.yaml | 224 +++ config/database.oracle.com_pdbs.yaml | 383 ++++++ ...database.oracle.com_shardingdatabases.yaml | 688 ++++++++++ ...se.oracle.com_singleinstancedatabases.yaml | 421 ++++++ config/manager/kustomization.yaml | 4 +- ...tabase-operator.clusterserviceversion.yaml | 97 +- ...vability.oracle.com_databaseobservers.yaml | 227 ++++ config/samples/kustomization.yaml | 32 +- .../samples/sidb/singleinstancedatabase.yaml | 15 + config/webhook/manifests.yaml | 41 + controllers/database/pdb_controller.go | 132 +- .../database/shardingdatabase_controller.go | 231 ++-- .../singleinstancedatabase_controller.go | 32 + .../databaseobserver_resource.go | 6 +- docs/multitenant/README.md | 142 +- docs/multitenant/provisioning/add_replica.log | 192 --- docs/multitenant/provisioning/add_replica.md | 36 - .../multitenant/provisioning/add_replica.yaml | 40 - docs/multitenant/provisioning/cdb.log | 279 ---- docs/multitenant/provisioning/cdb.yaml | 41 - .../provisioning/cdb_crd_resource.md | 38 - docs/multitenant/provisioning/cdb_secret.yaml | 17 - docs/multitenant/provisioning/clone_pdb.log | 137 -- docs/multitenant/provisioning/clone_pdb.md | 38 - docs/multitenant/provisioning/clone_pdb.yaml | 20 - docs/multitenant/provisioning/create_pdb.log | 139 -- docs/multitenant/provisioning/create_pdb.md | 37 - docs/multitenant/provisioning/create_pdb.yaml | 27 - docs/multitenant/provisioning/delete_pdb.log | 157 --- docs/multitenant/provisioning/delete_pdb.md | 37 - docs/multitenant/provisioning/delete_pdb.yaml | 16 - .../example_setup_using_oci_oke_cluster.md | 3 +- docs/multitenant/provisioning/known_issues.md | 49 - docs/multitenant/provisioning/modify_pdb.log | 181 --- docs/multitenant/provisioning/modify_pdb.md | 69 - .../provisioning/modify_pdb_close.yaml | 18 - .../provisioning/modify_pdb_open.yaml | 18 - .../multinamespace/cdb_create.yaml | 44 + .../multinamespace/pdb_clone.yaml | 50 + .../multinamespace/pdb_close.yaml | 44 + .../multinamespace/pdb_create.yaml | 46 + .../multinamespace/pdb_delete.yaml | 34 + .../provisioning/multinamespace/pdb_open.yaml | 43 + .../provisioning/multinamespace/pdb_plug.yaml | 46 + .../multinamespace/pdb_unplug.yaml | 39 + docs/multitenant/provisioning/ords_image.log | 503 ------- docs/multitenant/provisioning/ords_image.md | 49 +- .../provisioning/pdb_crd_resource.md | 0 docs/multitenant/provisioning/plug_pdb.log | 100 -- docs/multitenant/provisioning/plug_pdb.md | 44 - docs/multitenant/provisioning/plug_pdb.yaml | 22 - .../provisioning/quickOKEcreation.md | 136 ++ .../singlenamespace/cdb_create.yaml | 44 + .../singlenamespace/cdb_secret.yaml | 17 + .../singlenamespace/pdb_clone.yaml | 50 + .../singlenamespace/pdb_close.yaml | 44 + .../singlenamespace/pdb_create.yaml | 47 + .../singlenamespace/pdb_delete.yaml | 34 + .../singlenamespace/pdb_open.yaml | 43 + .../singlenamespace/pdb_plug.yaml | 46 + .../{ => singlenamespace}/pdb_secret.yaml | 11 +- .../pdb_unplug.yaml} | 32 +- docs/multitenant/provisioning/unplug_pdb.log | 165 --- docs/multitenant/provisioning/unplug_pdb.md | 39 - docs/multitenant/provisioning/unplug_pdb.yaml | 17 - .../provisioning/validation_error.md | 73 - docs/multitenant/usecase01/README.md | 150 ++- docs/multitenant/usecase01/ca.crt | 25 + docs/multitenant/usecase01/ca.key | 27 + docs/multitenant/usecase01/ca.srl | 1 + docs/multitenant/usecase01/extfile.txt | 1 + .../usecase01/logfiles/BuildImage.log | 1199 +++++++++++------ .../usecase01/logfiles/cdb_creation.log | 357 +++++ .../usecase01/logfiles/openssl_execution.log | 27 +- .../usecase01/logfiles/ordsconfig.log | 58 +- .../usecase01/logfiles/tagandpush.log | 14 + .../usecase01/logfiles/testapi.log | 29 +- docs/multitenant/usecase01/makefile | 29 +- docs/multitenant/usecase01/pdb_create.yaml | 1 + docs/multitenant/usecase01/pdb_delete.yaml | 34 + docs/multitenant/usecase01/pdb_map.yaml | 1 + docs/multitenant/usecase01/server.csr | 18 + docs/multitenant/usecase01/tls.crt | 24 + docs/multitenant/usecase01/tls.key | 28 + docs/multitenant/usecase02/README.md | 10 +- docs/multitenant/usecase02/pdb_clone.yaml | 1 + docs/multitenant/usecase02/pdb_plug.yaml | 1 + docs/multitenant/usecase03/Dockerfile | 63 +- docs/multitenant/usecase03/README.md | 8 +- docs/multitenant/usecase03/cdb_create.yaml | 4 +- docs/multitenant/usecase03/makefile | 18 +- docs/multitenant/usecase03/runOrdsSSL.sh | 190 +++ docs/sharding/README.md | 73 +- docs/sharding/provisioning/debugging.md | 7 + .../sharding_provisioning_with_db_events.md | 40 + .../sharding_provisioning_with_db_events.yaml | 69 + .../sharding_provisioning_with_free_images.md | 40 + ...harding_provisioning_with_free_images.yaml | 58 + ...y_cloning_db_from_gold_image_across_ads.md | 57 + ...ing_by_cloning_db_gold_image_in_same_ad.md | 53 + ...ding_provisioning_with_chunks_specified.md | 43 + ..._provisioning_with_control_on_resources.md | 47 + ...ith_notification_using_oci_notification.md | 87 ++ ...ding_provisioning_without_db_gold_image.md | 40 + ...rding_scale_in_delete_an_existing_shard.md | 50 + .../snr_ssharding_scale_out_add_shards.md | 37 + .../snr_ssharding_shard_prov.yaml | 58 + .../snr_ssharding_shard_prov_chunks.yaml | 61 + .../snr_ssharding_shard_prov_clone.yaml | 83 ++ ...ssharding_shard_prov_clone_across_ads.yaml | 91 ++ .../snr_ssharding_shard_prov_delshard.yaml | 69 + .../snr_ssharding_shard_prov_extshard.yaml | 68 + .../snr_ssharding_shard_prov_memory_cpu.yaml | 89 ++ ...sharding_shard_prov_send_notification.yaml | 85 ++ ...y_cloning_db_from_gold_image_across_ads.md | 7 +- ...ing_by_cloning_db_gold_image_in_same_ad.md | 4 +- ...ding_provisioning_with_chunks_specified.md | 40 + ..._provisioning_with_control_on_resources.md | 7 +- ...ith_notification_using_oci_notification.md | 5 +- ...ding_provisioning_without_db_gold_image.md | 9 +- ...rding_scale_in_delete_an_existing_shard.md | 7 +- .../ssharding_scale_out_add_shards.md | 5 +- .../system_sharding/ssharding_shard_prov.yaml | 1 - ...ssharding_shard_prov_clone_across_ads.yaml | 10 +- .../ssharding_shard_prov_delshard.yaml | 2 +- ...y_cloning_db_from_gold_image_across_ads.md | 2 + ...ing_by_cloning_db_gold_image_in_same_ad.md | 2 + ..._provisioning_with_control_on_resources.md | 3 + ...ith_notification_using_oci_notification.md | 3 + ...ding_provisioning_without_db_gold_image.md | 2 +- ...rding_scale_in_delete_an_existing_shard.md | 44 +- .../udsharding_scale_out_add_shards.md | 1 + .../udsharding_shard_prov.yaml | 1 - ...dsharding_shard_prov_clone_across_ads.yaml | 8 + .../udsharding_shard_prov_delshard.yaml | 4 +- .../udsharding_shard_prov_memory_cpu.yaml | 1 - docs/sidb/README.md | 4 + main.go | 3 + oracle-database-operator.yaml | 169 ++- ords/Dockerfile | 61 +- ords/runOrdsSSL.sh | 121 +- 173 files changed, 8888 insertions(+), 3613 deletions(-) create mode 100644 apis/database/v1alpha1/shardingdatabase_webhook.go create mode 100644 config/database.oracle.com_DbcsSystem.yaml create mode 100644 config/database.oracle.com_autonomouscontainerdatabases.yaml create mode 100644 config/database.oracle.com_autonomousdatabasebackups.yaml create mode 100644 config/database.oracle.com_autonomousdatabaserestores.yaml create mode 100644 config/database.oracle.com_autonomousdatabases.yaml create mode 100644 config/database.oracle.com_cdbs.yaml create mode 100644 config/database.oracle.com_dataguardbrokers.yaml create mode 100644 config/database.oracle.com_oraclerestdataservices.yaml create mode 100644 config/database.oracle.com_pdbs.yaml create mode 100644 config/database.oracle.com_shardingdatabases.yaml create mode 100644 config/database.oracle.com_singleinstancedatabases.yaml create mode 100644 config/observability.oracle.com_databaseobservers.yaml delete mode 100644 docs/multitenant/provisioning/add_replica.log delete mode 100644 docs/multitenant/provisioning/add_replica.md delete mode 100644 docs/multitenant/provisioning/add_replica.yaml delete mode 100644 docs/multitenant/provisioning/cdb.log delete mode 100644 docs/multitenant/provisioning/cdb.yaml delete mode 100644 docs/multitenant/provisioning/cdb_crd_resource.md delete mode 100644 docs/multitenant/provisioning/cdb_secret.yaml delete mode 100644 docs/multitenant/provisioning/clone_pdb.log delete mode 100644 docs/multitenant/provisioning/clone_pdb.md delete mode 100644 docs/multitenant/provisioning/clone_pdb.yaml delete mode 100644 docs/multitenant/provisioning/create_pdb.log delete mode 100644 docs/multitenant/provisioning/create_pdb.md delete mode 100644 docs/multitenant/provisioning/create_pdb.yaml delete mode 100644 docs/multitenant/provisioning/delete_pdb.log delete mode 100644 docs/multitenant/provisioning/delete_pdb.md delete mode 100644 docs/multitenant/provisioning/delete_pdb.yaml delete mode 100644 docs/multitenant/provisioning/known_issues.md delete mode 100644 docs/multitenant/provisioning/modify_pdb.log delete mode 100644 docs/multitenant/provisioning/modify_pdb.md delete mode 100644 docs/multitenant/provisioning/modify_pdb_close.yaml delete mode 100644 docs/multitenant/provisioning/modify_pdb_open.yaml create mode 100644 docs/multitenant/provisioning/multinamespace/cdb_create.yaml create mode 100644 docs/multitenant/provisioning/multinamespace/pdb_clone.yaml create mode 100644 docs/multitenant/provisioning/multinamespace/pdb_close.yaml create mode 100644 docs/multitenant/provisioning/multinamespace/pdb_create.yaml create mode 100644 docs/multitenant/provisioning/multinamespace/pdb_delete.yaml create mode 100644 docs/multitenant/provisioning/multinamespace/pdb_open.yaml create mode 100644 docs/multitenant/provisioning/multinamespace/pdb_plug.yaml create mode 100644 docs/multitenant/provisioning/multinamespace/pdb_unplug.yaml delete mode 100644 docs/multitenant/provisioning/ords_image.log delete mode 100644 docs/multitenant/provisioning/pdb_crd_resource.md delete mode 100644 docs/multitenant/provisioning/plug_pdb.log delete mode 100644 docs/multitenant/provisioning/plug_pdb.md delete mode 100644 docs/multitenant/provisioning/plug_pdb.yaml create mode 100644 docs/multitenant/provisioning/quickOKEcreation.md create mode 100644 docs/multitenant/provisioning/singlenamespace/cdb_create.yaml create mode 100644 docs/multitenant/provisioning/singlenamespace/cdb_secret.yaml create mode 100644 docs/multitenant/provisioning/singlenamespace/pdb_clone.yaml create mode 100644 docs/multitenant/provisioning/singlenamespace/pdb_close.yaml create mode 100644 docs/multitenant/provisioning/singlenamespace/pdb_create.yaml create mode 100644 docs/multitenant/provisioning/singlenamespace/pdb_delete.yaml create mode 100644 docs/multitenant/provisioning/singlenamespace/pdb_open.yaml create mode 100644 docs/multitenant/provisioning/singlenamespace/pdb_plug.yaml rename docs/multitenant/provisioning/{ => singlenamespace}/pdb_secret.yaml (54%) rename docs/multitenant/provisioning/{pdb.yaml => singlenamespace/pdb_unplug.yaml} (50%) delete mode 100644 docs/multitenant/provisioning/unplug_pdb.log delete mode 100644 docs/multitenant/provisioning/unplug_pdb.md delete mode 100644 docs/multitenant/provisioning/unplug_pdb.yaml delete mode 100644 docs/multitenant/provisioning/validation_error.md create mode 100644 docs/multitenant/usecase01/ca.crt create mode 100644 docs/multitenant/usecase01/ca.key create mode 100644 docs/multitenant/usecase01/ca.srl create mode 100644 docs/multitenant/usecase01/extfile.txt create mode 100644 docs/multitenant/usecase01/logfiles/cdb_creation.log create mode 100644 docs/multitenant/usecase01/logfiles/tagandpush.log create mode 100644 docs/multitenant/usecase01/pdb_delete.yaml create mode 100644 docs/multitenant/usecase01/server.csr create mode 100644 docs/multitenant/usecase01/tls.crt create mode 100644 docs/multitenant/usecase01/tls.key create mode 100644 docs/multitenant/usecase03/runOrdsSSL.sh create mode 100644 docs/sharding/provisioning/debugging/sharding_provisioning_with_db_events.md create mode 100644 docs/sharding/provisioning/debugging/sharding_provisioning_with_db_events.yaml create mode 100644 docs/sharding/provisioning/free/sharding_provisioning_with_free_images.md create mode 100644 docs/sharding/provisioning/free/sharding_provisioning_with_free_images.yaml create mode 100644 docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md create mode 100644 docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md create mode 100644 docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_chunks_specified.md create mode 100644 docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_control_on_resources.md create mode 100644 docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_notification_using_oci_notification.md create mode 100644 docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_without_db_gold_image.md create mode 100644 docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_in_delete_an_existing_shard.md create mode 100644 docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_out_add_shards.md create mode 100644 docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov.yaml create mode 100644 docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_chunks.yaml create mode 100644 docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone.yaml create mode 100644 docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone_across_ads.yaml create mode 100644 docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_delshard.yaml create mode 100644 docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_extshard.yaml create mode 100644 docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_memory_cpu.yaml create mode 100644 docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_send_notification.yaml create mode 100644 docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_chunks_specified.md diff --git a/.gitignore b/.gitignore index 618e3efb..98fbc1c4 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,7 @@ ords/*zip .gitattributes .vscode .gitlab-ci.yml + +# development +.idea +.local diff --git a/PROJECT b/PROJECT index 7b3ec718..fbf861db 100644 --- a/PROJECT +++ b/PROJECT @@ -1,3 +1,7 @@ +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html domain: oracle.com layout: - go.kubebuilder.io/v2 @@ -67,6 +71,10 @@ resources: kind: ShardingDatabase path: github.com/oracle/oracle-database-operator/apis/database/v1alpha1 version: v1alpha1 + webhooks: + defaulting: true + validation: true + webhookVersion: v1beta1 - api: crdVersion: v1 namespaced: true diff --git a/README.md b/README.md index 38c48aad..959045cb 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,7 @@ In this v1.1.0 production release, `OraOperator` supports the following database Oracle will continue to extend `OraOperator` to support additional Oracle Database configurations. ## New in V1.1.0 Release +* Namespace scope deployment option * Enhanced security with namespace scope deployment option * Support for Oracle Database 23ai Free (with SIDB) * Automatic Storage Expansion for SIDB and Sharded DB @@ -125,7 +126,7 @@ Oracle strongly recommends that you ensure your system meets the following [Prer ```sh kubectl apply -f rbac/node-rbac.yaml ``` - +# Installation ## Install Oracle DB Operator After you have completed the preceding prerequisite changes, you can install the operator. To install the operator in the cluster quickly, you can apply the modified `oracle-database-operator.yaml` file from the preceding step. diff --git a/SECURITY.md b/SECURITY.md index fb238413..30159518 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1,4 +1,4 @@ -# Reporting security vulnerabilities +# Oracle's instructions for reporting security vulnerabilities Oracle values the independent security research community and believes that responsible disclosure of security vulnerabilities helps us ensure the security @@ -35,4 +35,4 @@ sufficiently hardened for production use. [1]: mailto:secalert_us@oracle.com [2]: https://www.oracle.com/corporate/security-practices/assurance/vulnerability/reporting.html [3]: https://www.oracle.com/security-alerts/encryptionkey.html -[4]: https://www.oracle.com/security-alerts/ +[4]: https://www.oracle.com/security-alerts/ \ No newline at end of file diff --git a/apis/database/v1alpha1/adbfamily_common_utils.go b/apis/database/v1alpha1/adbfamily_common_utils.go index e6691a12..d4d3ae9f 100644 --- a/apis/database/v1alpha1/adbfamily_common_utils.go +++ b/apis/database/v1alpha1/adbfamily_common_utils.go @@ -173,9 +173,9 @@ func traverse(lastSpec interface{}, curSpec interface{}) bool { return changed } -// 1. If the current field is with a zero value, then the field is unchanged. -// 2. If the current field is NOT with a zero value, then we want to comapre it with the last field. -// In this case if the last field is with a zero value, then the field is changed +// 1. If the current field is with a zero value, then the field is unchanged. +// 2. If the current field is NOT with a zero value, then we want to comapre it with the last field. +// In this case if the last field is with a zero value, then the field is changed func hasChanged(lastField reflect.Value, curField reflect.Value) bool { zero := reflect.Zero(lastField.Type()).Interface() lastFieldIsZero := reflect.DeepEqual(lastField.Interface(), zero) diff --git a/apis/database/v1alpha1/autonomousdatabasebackup_types.go b/apis/database/v1alpha1/autonomousdatabasebackup_types.go index 876cb811..95c77560 100644 --- a/apis/database/v1alpha1/autonomousdatabasebackup_types.go +++ b/apis/database/v1alpha1/autonomousdatabasebackup_types.go @@ -52,12 +52,12 @@ import ( type AutonomousDatabaseBackupSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make" to regenerate code after modifying this file - Target TargetSpec `json:"target,omitempty"` - DisplayName *string `json:"displayName,omitempty"` - AutonomousDatabaseBackupOCID *string `json:"autonomousDatabaseBackupOCID,omitempty"` - IsLongTermBackup *bool `json:"isLongTermBackup,omitempty"` - RetentionPeriodInDays *int `json:"retentionPeriodInDays,omitempty"` - OCIConfig OCIConfigSpec `json:"ociConfig,omitempty"` + Target TargetSpec `json:"target,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + AutonomousDatabaseBackupOCID *string `json:"autonomousDatabaseBackupOCID,omitempty"` + IsLongTermBackup *bool `json:"isLongTermBackup,omitempty"` + RetentionPeriodInDays *int `json:"retentionPeriodInDays,omitempty"` + OCIConfig OCIConfigSpec `json:"ociConfig,omitempty"` } // AutonomousDatabaseBackupStatus defines the observed state of AutonomousDatabaseBackup diff --git a/apis/database/v1alpha1/dataguardbroker_types.go b/apis/database/v1alpha1/dataguardbroker_types.go index 138e2bdb..37d71b92 100644 --- a/apis/database/v1alpha1/dataguardbroker_types.go +++ b/apis/database/v1alpha1/dataguardbroker_types.go @@ -50,15 +50,15 @@ type DataguardBrokerSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make" to regenerate code after modifying this file - PrimaryDatabaseRef string `json:"primaryDatabaseRef"` - StandbyDatabaseRefs []string `json:"standbyDatabaseRefs"` - SetAsPrimaryDatabase string `json:"setAsPrimaryDatabase,omitempty"` - LoadBalancer bool `json:"loadBalancer,omitempty"` - ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` + PrimaryDatabaseRef string `json:"primaryDatabaseRef"` + StandbyDatabaseRefs []string `json:"standbyDatabaseRefs"` + SetAsPrimaryDatabase string `json:"setAsPrimaryDatabase,omitempty"` + LoadBalancer bool `json:"loadBalancer,omitempty"` + ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` // +kubebuilder:validation:Enum=MaxPerformance;MaxAvailability - ProtectionMode string `json:"protectionMode"` - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - FastStartFailOver DataguardBrokerFastStartFailOver `json:"fastStartFailOver,omitempty"` + ProtectionMode string `json:"protectionMode"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + FastStartFailOver DataguardBrokerFastStartFailOver `json:"fastStartFailOver,omitempty"` } type DataguardBrokerFastStartFailOver struct { diff --git a/apis/database/v1alpha1/groupversion_info.go b/apis/database/v1alpha1/groupversion_info.go index d72ee908..3c4b1804 100644 --- a/apis/database/v1alpha1/groupversion_info.go +++ b/apis/database/v1alpha1/groupversion_info.go @@ -37,8 +37,8 @@ */ // Package v1alpha1 contains API Schema definitions for the database v1alpha1 API group -//+kubebuilder:object:generate=true -//+groupName=database.oracle.com +// +kubebuilder:object:generate=true +// +groupName=database.oracle.com package v1alpha1 import ( diff --git a/apis/database/v1alpha1/pdb_types.go b/apis/database/v1alpha1/pdb_types.go index 1e4da0a1..8de9db52 100644 --- a/apis/database/v1alpha1/pdb_types.go +++ b/apis/database/v1alpha1/pdb_types.go @@ -114,6 +114,10 @@ type PDBSpec struct { // The target state of the PDB // +kubebuilder:validation:Enum=OPEN;CLOSE PDBState string `json:"pdbState,omitempty"` + // turn on the assertive approach to delete pdb resource + // kubectl delete pdb ..... automatically triggers the pluggable database + // deletion + AssertivePdbDeletion bool `json:"assertivePdbDeletion,omitempty"` } // PDBAdminName defines the secret containing Sys Admin User mapped to key 'adminName' for PDB diff --git a/apis/database/v1alpha1/shardingdatabase_types.go b/apis/database/v1alpha1/shardingdatabase_types.go index 3b4f9c17..ffc17ab0 100644 --- a/apis/database/v1alpha1/shardingdatabase_types.go +++ b/apis/database/v1alpha1/shardingdatabase_types.go @@ -58,39 +58,43 @@ import ( type ShardingDatabaseSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make" to regenerate code after modifying this file - Shard []ShardSpec `json:"shard"` - Catalog []CatalogSpec `json:"catalog"` // The catalogSpes accept all the catalog parameters - Gsm []GsmSpec `json:"gsm"` // The GsmSpec will accept all the Gsm parameter - StorageClass string `json:"storageClass,omitempty"` // Optional Accept storage class name - DbImage string `json:"dbImage"` // Accept DB Image name - DbImagePullSecret string `json:"dbImagePullSecret,omitempty"` // Optional The name of an image pull secret in case of a private docker repository. - GsmImage string `json:"gsmImage"` // Acccept the GSM image name - GsmImagePullSecret string `json:"gsmImagePullSecret,omitempty"` // Optional The name of an image pull secret in case of a private docker repository. - StagePvcName string `json:"stagePvcName,omitempty"` // the Stagepvc for the backup of cluster - PortMappings []PortMapping `json:"portMappings,omitempty"` // Port mappings for the service that is created. The service is created if there is at least - Namespace string `json:"namespace,omitempty"` // Target namespace of the application. - IsDebug bool `json:"isDebug,omitempty"` // Optional parameter to enable logining - IsExternalSvc bool `json:"isExternalSvc,omitempty"` - IsClone bool `json:"isClone,omitempty"` - IsDataGuard bool `json:"isDataGuard,omitempty"` - ScriptsLocation string `json:"scriptsLocation,omitempty"` - IsDeleteOraPvc bool `json:"isDeleteOraPvc,omitempty"` - ReadinessCheckPeriod int `json:"readinessCheckPeriod,omitempty"` - LivenessCheckPeriod int `json:"liveinessCheckPeriod,omitempty"` - ReplicationType string `json:"replicationType,omitempty"` - IsDownloadScripts bool `json:"isDownloadScripts,omitempty"` - InvitedNodeSubnetFlag string `json:"invitedNodeSubnetFlag,omitempty"` - InvitedNodeSubnet string `json:"InvitedNodeSubnet,omitempty"` - ShardingType string `json:"shardingType,omitempty"` - GsmShardSpace []GsmShardSpaceSpec `json:"gsmShardSpace,omitempty"` - GsmShardGroup []GsmShardGroupSpec `json:"gsmShardGroup,omitempty"` - ShardRegion []string `json:"shardRegion,omitempty"` - ShardBuddyRegion string `json:"shardBuddyRegion,omitempty"` - GsmService []GsmServiceSpec `json:"gsmService,omitempty"` - ShardConfigName string `json:"shardConfigName,omitempty"` - GsmDevMode string `json:"gsmDevMode,omitempty"` - DbSecret *SecretDetails `json:"dbSecret,omitempty"` // Secret Name to be used with Shard - IsTdeWallet bool `json:"isTdeWallet,omitempty"` + Shard []ShardSpec `json:"shard"` + Catalog []CatalogSpec `json:"catalog"` // The catalogSpes accept all the catalog parameters + Gsm []GsmSpec `json:"gsm"` // The GsmSpec will accept all the Gsm parameter + StorageClass string `json:"storageClass,omitempty"` // Optional Accept storage class name + DbImage string `json:"dbImage"` // Accept DB Image name + DbImagePullSecret string `json:"dbImagePullSecret,omitempty"` // Optional The name of an image pull secret in case of a private docker repository. + GsmImage string `json:"gsmImage"` // Acccept the GSM image name + GsmImagePullSecret string `json:"gsmImagePullSecret,omitempty"` // Optional The name of an image pull secret in case of a private docker repository. + StagePvcName string `json:"stagePvcName,omitempty"` // the Stagepvc for the backup of cluster + PortMappings []PortMapping `json:"portMappings,omitempty"` // Port mappings for the service that is created. The service is created if there is at least + Namespace string `json:"namespace,omitempty"` // Target namespace of the application. + IsDebug bool `json:"isDebug,omitempty"` // Optional parameter to enable logining + IsExternalSvc bool `json:"isExternalSvc,omitempty"` + IsClone bool `json:"isClone,omitempty"` + IsDataGuard bool `json:"isDataGuard,omitempty"` + ScriptsLocation string `json:"scriptsLocation,omitempty"` + IsDeleteOraPvc bool `json:"isDeleteOraPvc,omitempty"` + ReadinessCheckPeriod int `json:"readinessCheckPeriod,omitempty"` + LivenessCheckPeriod int `json:"liveinessCheckPeriod,omitempty"` + ReplicationType string `json:"replicationType,omitempty"` + IsDownloadScripts bool `json:"isDownloadScripts,omitempty"` + InvitedNodeSubnetFlag string `json:"invitedNodeSubnetFlag,omitempty"` + InvitedNodeSubnet string `json:"InvitedNodeSubnet,omitempty"` + ShardingType string `json:"shardingType,omitempty"` + GsmShardSpace []GsmShardSpaceSpec `json:"gsmShardSpace,omitempty"` + GsmShardGroup []GsmShardGroupSpec `json:"gsmShardGroup,omitempty"` + ShardRegion []string `json:"shardRegion,omitempty"` + ShardBuddyRegion string `json:"shardBuddyRegion,omitempty"` + GsmService []GsmServiceSpec `json:"gsmService,omitempty"` + ShardConfigName string `json:"shardConfigName,omitempty"` + GsmDevMode string `json:"gsmDevMode,omitempty"` + DbSecret *SecretDetails `json:"dbSecret,omitempty"` // Secret Name to be used with Shard + IsTdeWallet string `json:"isTdeWallet,omitempty"` + TdeWalletPvc string `json:"tdeWalletPvc,omitempty"` + FssStorageClass string `json:"fssStorageClass,omitempty"` + TdeWalletPvcMountLocation string `json:"tdeWalletPvcMountLocation,omitempty"` + DbEdition string `json:"dbEdition,omitempty"` } // To understand Metav1.Condition, please refer the link https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1 @@ -164,21 +168,22 @@ type ShardingDatabaseList struct { // ShardSpec is a specification of Shards for an application deployment. // +k8s:openapi-gen=true type ShardSpec struct { - Name string `json:"name"` // Shard name that will be used deploy StatefulSet - StorageSizeInGb int32 `json:"storageSizeInGb,omitempty"` // Optional Shard Storage Size - EnvVars []EnvironmentVariable `json:"envVars,omitempty"` //Optional Env variables for Shards - Resources *corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,1,opt,name=resources"` //Optional resource requirement for the container. - PvcName string `json:"pvcName,omitempty"` - Label string `json:"label,omitempty"` - IsDelete bool `json:"isDelete,omitempty"` - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - PvAnnotations map[string]string `json:"pvAnnotations,omitempty"` - PvMatchLabels map[string]string `json:"pvMatchLabels,omitempty"` - ImagePulllPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` - ShardSpace string `json:"shardSpace,omitempty"` - ShardGroup string `json:"shardGroup,omitempty"` - ShardRegion string `json:"shardRegion,omitempty"` - DeployAs string `json:"deployAs,omitempty"` + Name string `json:"name"` // Shard name that will be used deploy StatefulSet + StorageSizeInGb int32 `json:"storageSizeInGb,omitempty"` // Optional Shard Storage Size + EnvVars []EnvironmentVariable `json:"envVars,omitempty"` //Optional Env variables for Shards + Resources *corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,1,opt,name=resources"` //Optional resource requirement for the container. + PvcName string `json:"pvcName,omitempty"` + Label string `json:"label,omitempty"` + // +kubebuilder:validation:Enum=enable;disable;failed;force + IsDelete string `json:"isDelete,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + PvAnnotations map[string]string `json:"pvAnnotations,omitempty"` + PvMatchLabels map[string]string `json:"pvMatchLabels,omitempty"` + ImagePulllPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + ShardSpace string `json:"shardSpace,omitempty"` + ShardGroup string `json:"shardGroup,omitempty"` + ShardRegion string `json:"shardRegion,omitempty"` + DeployAs string `json:"deployAs,omitempty"` } // CatalogSpec defines the desired state of CatalogSpec @@ -190,7 +195,7 @@ type CatalogSpec struct { Resources *corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,1,opt,name=resources"` // Optional resource requirement for the container. PvcName string `json:"pvcName,omitempty"` Label string `json:"label,omitempty"` - IsDelete bool `json:"isDelete,omitempty"` + IsDelete string `json:"isDelete,omitempty"` NodeSelector map[string]string `json:"nodeSelector,omitempty"` PvAnnotations map[string]string `json:"pvAnnotations,omitempty"` PvMatchLabels map[string]string `json:"pvMatchLabels,omitempty"` @@ -208,7 +213,7 @@ type GsmSpec struct { Resources *corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,1,opt,name=resources"` // Optional resource requirement for the container. PvcName string `json:"pvcName,omitempty"` Label string `json:"label,omitempty"` // Optional GSM Label - IsDelete bool `json:"isDelete,omitempty"` + IsDelete string `json:"isDelete,omitempty"` NodeSelector map[string]string `json:"nodeSelector,omitempty"` PvMatchLabels map[string]string `json:"pvMatchLabels,omitempty"` ImagePulllPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` diff --git a/apis/database/v1alpha1/shardingdatabase_webhook.go b/apis/database/v1alpha1/shardingdatabase_webhook.go new file mode 100644 index 00000000..8b91fb0c --- /dev/null +++ b/apis/database/v1alpha1/shardingdatabase_webhook.go @@ -0,0 +1,270 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1alpha1 + +import ( + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var shardingdatabaselog = logf.Log.WithName("shardingdatabase-resource") + +func (r *ShardingDatabase) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! + +//+kubebuilder:webhook:path=/mutate-database-oracle-com-v1alpha1-shardingdatabase,mutating=true,failurePolicy=fail,sideEffects=none,groups=database.oracle.com,resources=shardingdatabases,verbs=create;update,versions=v1alpha1,name=mshardingdatabase.kb.io,admissionReviewVersions={v1} + +var _ webhook.Defaulter = &ShardingDatabase{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *ShardingDatabase) Default() { + shardingdatabaselog.Info("default", "name", r.Name) + + // TODO(user): fill in your defaulting logic. + if r.Spec.GsmDevMode != "" { + r.Spec.GsmDevMode = "dev" + } + + if r.Spec.IsTdeWallet == "" { + r.Spec.IsTdeWallet = "disable" + } + for pindex := range r.Spec.Shard { + if strings.ToLower(r.Spec.Shard[pindex].IsDelete) == "" { + r.Spec.Shard[pindex].IsDelete = "disable" + } + } + +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:verbs=create;update;delete,path=/validate-database-oracle-com-v1alpha1-shardingdatabase,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=shardingdatabases,versions=v1alpha1,name=vshardingdatabase.kb.io,admissionReviewVersions={v1} + +var _ webhook.Validator = &ShardingDatabase{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *ShardingDatabase) ValidateCreate() (admission.Warnings, error) { + shardingdatabaselog.Info("validate create", "name", r.Name) + + // TODO(user): fill in your validation logic upon object creation. + // Check Secret configuration + var validationErr field.ErrorList + var validationErrs1 field.ErrorList + + //namespaces := db.GetWatchNamespaces() + //_, containsNamespace := namespaces[r.Namespace] + // Check if the allowed namespaces maps contains the required namespace + // if len(namespaces) != 0 && !containsNamespace { + // validationErr = append(validationErr, + // field.Invalid(field.NewPath("metadata").Child("namespace"), r.Namespace, + // "Oracle database operator doesn't watch over this namespace")) + //} + + if r.Spec.DbSecret == nil { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("DbSecret"), r.Spec.DbSecret, + "DbSecret cannot be set to nil")) + } else { + if len(r.Spec.DbSecret.Name) == 0 { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("DbSecret").Child("Name"), r.Spec.DbSecret.Name, + "Secret name cannot be set empty")) + } + if len(r.Spec.DbSecret.PwdFileName) == 0 { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("DbSecret").Child("PwdFileName"), r.Spec.DbSecret.PwdFileName, + "Password file name cannot be set empty")) + } + if strings.ToLower(r.Spec.DbSecret.EncryptionType) != "base64" { + if strings.ToLower(r.Spec.DbSecret.KeyFileName) == "" { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("DbSecret").Child("KeyFileName"), r.Spec.DbSecret.KeyFileName, + "Key file name cannot be empty")) + } + } + + /** + if len(r.Spec.DbSecret.PwdFileMountLocation) == 0 { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("DbSecret").Child("PwdFileMountLocation"), r.Spec.DbSecret.PwdFileMountLocation, + "Password file mount location cannot be empty")) + } + + if len(r.Spec.DbSecret.KeyFileMountLocation) == 0 { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("DbSecret").Child("KeyFileMountLocation"), r.Spec.DbSecret.KeyFileMountLocation, + "KeyFileMountLocation file mount location cannot be empty")) + } + **/ + } + + if r.Spec.IsTdeWallet == "enable" { + if (len(r.Spec.FssStorageClass) == 0) && (len(r.Spec.TdeWalletPvc) == 0) { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("FssStorageClass"), r.Spec.FssStorageClass, + "FssStorageClass or TdeWalletPvc cannot be set empty if isTdeWallet set to true")) + + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("TdeWalletPvc"), r.Spec.TdeWalletPvc, + "FssStorageClass or TdeWalletPvc cannot be set empty if isTdeWallet set to true")) + } + } + + if r.Spec.IsTdeWallet != "" { + if (strings.ToLower(strings.TrimSpace(r.Spec.IsTdeWallet)) != "enable") && (strings.ToLower(strings.TrimSpace(r.Spec.IsTdeWallet)) != "disable") { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("isTdeWallet"), r.Spec.IsTdeWallet, + "isTdeWallet can be set to only \"enable\" or \"disable\"")) + } + } + + validationErrs1 = r.validateShardIsDelete() + if validationErrs1 != nil { + validationErr = append(validationErr, validationErrs1...) + } + + validationErrs1 = r.validateFreeEdition() + if validationErrs1 != nil { + validationErr = append(validationErr, validationErrs1...) + } + + // TODO(user): fill in your validation logic upon object creation. + if len(validationErr) == 0 { + return nil, nil + } + + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "ShardingDatabase"}, + r.Name, validationErr) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *ShardingDatabase) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + shardingdatabaselog.Info("validate update", "name", r.Name) + + // TODO(user): fill in your validation logic upon object update. + return nil, nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *ShardingDatabase) ValidateDelete() (admission.Warnings, error) { + shardingdatabaselog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} + +// ###### Vlaidation Block ################# + +func (r *ShardingDatabase) validateShardIsDelete() field.ErrorList { + + var validationErrs field.ErrorList + + for pindex := range r.Spec.Shard { + if (strings.ToLower(strings.TrimSpace(r.Spec.Shard[pindex].IsDelete)) != "enable") && (strings.ToLower(strings.TrimSpace(r.Spec.Shard[pindex].IsDelete)) != "disable") && (strings.ToLower(strings.TrimSpace(r.Spec.Shard[pindex].IsDelete)) != "failed") { + validationErrs = append(validationErrs, + field.Invalid(field.NewPath("spec").Child("shard").Child("isDelete"), r.Spec.Shard[pindex].IsDelete, + "r.Spec.Shard[pindex].IsDelete can be set to only enable|disable|failed")) + } + } + + if len(validationErrs) > 0 { + return validationErrs + } + return nil +} + +func (r *ShardingDatabase) validateFreeEdition() field.ErrorList { + + var validationErrs field.ErrorList + if strings.ToLower(r.Spec.DbEdition) == "free" { + // Shard Spec Checks + for i := 0; i < len(r.Spec.Shard); i++ { + for index, variable := range r.Spec.Shard[i].EnvVars { + if variable.Name == "ORACLE_SID" { + if strings.ToLower(variable.Value) != "free" { + validationErrs = append(validationErrs, field.Invalid(field.NewPath("spec").Child("shard").Child("EnvVars"), r.Spec.Shard[i].EnvVars[index].Name, + "r.Spec.Shard[i].EnvVars[index].Name ORACLE_SID value can only be set to free")) + } + } + if variable.Name == "ORACLE_PDB" { + if strings.ToLower(variable.Value) != "freepdb" { + validationErrs = append(validationErrs, field.Invalid(field.NewPath("spec").Child("shard").Child("EnvVars"), r.Spec.Shard[i].EnvVars[index].Name, + "r.Spec.Shard[i].EnvVars[index].Name ORACLE_PDB value can only be set to freepdb")) + } + } + } + } + // Catalog Spec Checks + for i := 0; i < len(r.Spec.Catalog); i++ { + for index, variable := range r.Spec.Catalog[i].EnvVars { + if variable.Name == "ORACLE_SID" { + if strings.ToLower(variable.Value) != "free" { + validationErrs = append(validationErrs, field.Invalid(field.NewPath("spec").Child("catalog").Child("EnvVars"), r.Spec.Catalog[i].EnvVars[index].Name, + "r.Spec.Catalog[i].EnvVars[index].Name ORACLE_SID value can only be set to free")) + } + } + if variable.Name == "ORACLE_PDB" { + if strings.ToLower(variable.Value) != "freepdb" { + validationErrs = append(validationErrs, field.Invalid(field.NewPath("spec").Child("catalog").Child("EnvVars"), r.Spec.Catalog[i].EnvVars[index].Name, + "r.Spec.Catalog[i].EnvVars[index].Name ORACLE_PDB value can only be set to freepdb")) + } + } + } + } + } + + if len(validationErrs) > 0 { + return validationErrs + } + return nil +} diff --git a/apis/database/v1alpha1/singleinstancedatabase_types.go b/apis/database/v1alpha1/singleinstancedatabase_types.go index 0d49aa6b..7c6c1ea5 100644 --- a/apis/database/v1alpha1/singleinstancedatabase_types.go +++ b/apis/database/v1alpha1/singleinstancedatabase_types.go @@ -86,6 +86,17 @@ type SingleInstanceDatabaseSpec struct { Image SingleInstanceDatabaseImage `json:"image"` Persistence SingleInstanceDatabasePersistence `json:"persistence,omitempty"` InitParams *SingleInstanceDatabaseInitParams `json:"initParams,omitempty"` + Resources SingleInstanceDatabaseResources `json:"resources,omitempty"` +} + +type SingleInstanceDatabaseResource struct { + Cpu string `json:"cpu,omitempty"` + Memory string `json:"memory,omitempty"` +} + +type SingleInstanceDatabaseResources struct { + Requests *SingleInstanceDatabaseResource `json:"requests,omitempty"` + Limits *SingleInstanceDatabaseResource `json:"limits,omitempty"` } // SingleInstanceDatabasePersistence defines the storage size and class for PVC diff --git a/apis/database/v1alpha1/webhook_suite_test.go b/apis/database/v1alpha1/webhook_suite_test.go index 3f740a41..e28925e6 100644 --- a/apis/database/v1alpha1/webhook_suite_test.go +++ b/apis/database/v1alpha1/webhook_suite_test.go @@ -61,8 +61,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" - "sigs.k8s.io/controller-runtime/pkg/webhook" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/webhook" ) // To avoid dot import @@ -137,14 +137,14 @@ var _ = BeforeSuite(func() { // start webhook server using Manager webhookInstallOptions := &testEnv.WebhookInstallOptions mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme, + Scheme: scheme, WebhookServer: webhook.NewServer(webhook.Options{ Port: webhookInstallOptions.LocalServingPort, Host: webhookInstallOptions.LocalServingHost, CertDir: webhookInstallOptions.LocalServingCertDir, }), - LeaderElection: false, - Metrics: metricsserver.Options{ + LeaderElection: false, + Metrics: metricsserver.Options{ BindAddress: "0", }, }) diff --git a/apis/database/v1alpha1/zz_generated.deepcopy.go b/apis/database/v1alpha1/zz_generated.deepcopy.go index f1eaf503..10b34ca7 100644 --- a/apis/database/v1alpha1/zz_generated.deepcopy.go +++ b/apis/database/v1alpha1/zz_generated.deepcopy.go @@ -2470,6 +2470,46 @@ func (in *SingleInstanceDatabasePersistence) DeepCopy() *SingleInstanceDatabaseP return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingleInstanceDatabaseResource) DeepCopyInto(out *SingleInstanceDatabaseResource) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabaseResource. +func (in *SingleInstanceDatabaseResource) DeepCopy() *SingleInstanceDatabaseResource { + if in == nil { + return nil + } + out := new(SingleInstanceDatabaseResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingleInstanceDatabaseResources) DeepCopyInto(out *SingleInstanceDatabaseResources) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = new(SingleInstanceDatabaseResource) + **out = **in + } + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = new(SingleInstanceDatabaseResource) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabaseResources. +func (in *SingleInstanceDatabaseResources) DeepCopy() *SingleInstanceDatabaseResources { + if in == nil { + return nil + } + out := new(SingleInstanceDatabaseResources) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SingleInstanceDatabaseSpec) DeepCopyInto(out *SingleInstanceDatabaseSpec) { *out = *in @@ -2510,6 +2550,7 @@ func (in *SingleInstanceDatabaseSpec) DeepCopyInto(out *SingleInstanceDatabaseSp *out = new(SingleInstanceDatabaseInitParams) **out = **in } + in.Resources.DeepCopyInto(&out.Resources) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabaseSpec. diff --git a/commons/dbcssystem/dbcs_reconciler.go b/commons/dbcssystem/dbcs_reconciler.go index 70e50294..60905c76 100644 --- a/commons/dbcssystem/dbcs_reconciler.go +++ b/commons/dbcssystem/dbcs_reconciler.go @@ -94,7 +94,7 @@ func CreateAndGetDbcsId(logger logr.Logger, kubeClient client.Client, dbClient d dbcsDetails.CompartmentId = common.String(dbcs.Spec.DbSystem.CompartmentId) dbcsDetails.SubnetId = common.String(dbcs.Spec.DbSystem.SubnetId) dbcsDetails.Shape = common.String(dbcs.Spec.DbSystem.Shape) - dbcsDetails.Domain = common.String(dbcs.Spec.DbSystem.Domain) + dbcsDetails.Domain = common.String(dbcs.Spec.DbSystem.Domain) if dbcs.Spec.DbSystem.DisplayName != "" { dbcsDetails.DisplayName = common.String(dbcs.Spec.DbSystem.DisplayName) } @@ -536,10 +536,10 @@ func GetResourceState(logger logr.Logger, dbClient database.DatabaseClient, Id s func SetDBCSStatus(dbClient database.DatabaseClient, dbcs *databasev1alpha1.DbcsSystem, nwClient core.VirtualNetworkClient, wrClient workrequests.WorkRequestClient) error { - if dbcs.Spec.Id == nil { - dbcs.Status.State = "FAILED" - return nil - } + if dbcs.Spec.Id == nil { + dbcs.Status.State = "FAILED" + return nil + } dbcsId := *dbcs.Spec.Id diff --git a/commons/observability/constants.go b/commons/observability/constants.go index a4d85b71..89ecb946 100644 --- a/commons/observability/constants.go +++ b/commons/observability/constants.go @@ -38,6 +38,7 @@ const ( DefaultExporterImage = "container-registry.oracle.com/database/observability-exporter:1.1.0" DefaultServicePort = 9161 + DefaultServiceTargetPort = 9161 DefaultPrometheusPort = "metrics" DefaultReplicaCount = 1 DefaultExporterConfigMountRootPath = "/oracle/observability" diff --git a/commons/oci/wallet.go b/commons/oci/wallet.go index a5f9235e..076460b1 100644 --- a/commons/oci/wallet.go +++ b/commons/oci/wallet.go @@ -107,4 +107,4 @@ func WalletExpiringDate(files map[string][]byte) string { line := data[strings.Index(data, "this wallet will expire on"):strings.Index(data, ".\nIn order to avoid")] return strings.TrimSpace(strings.TrimPrefix(line, "this wallet will expire on")) -} \ No newline at end of file +} diff --git a/commons/sharding/catalog.go b/commons/sharding/catalog.go index 8a0019dd..58f07490 100644 --- a/commons/sharding/catalog.go +++ b/commons/sharding/catalog.go @@ -43,9 +43,8 @@ import ( "reflect" "strconv" - databasev1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" - "github.com/go-logr/logr" + databasev1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -198,6 +197,12 @@ func buildVolumeSpecForCatalog(instance *databasev1alpha1.ShardingDatabase, OraC result = append(result, corev1.Volume{Name: OraCatalogSpex.Name + "orascript-vol5", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}) } + if checkTdeWalletFlag(instance) { + if len(instance.Spec.FssStorageClass) == 0 && len(instance.Spec.TdeWalletPvc) > 0 { + result = append(result, corev1.Volume{Name: OraCatalogSpex.Name + "shared-storage-vol8", VolumeSource: corev1.VolumeSource{PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: instance.Spec.TdeWalletPvc}}}) + } + } + return result } @@ -210,7 +215,7 @@ func buildContainerSpecForCatalog(instance *databasev1alpha1.ShardingDatabase, O Image: instance.Spec.DbImage, SecurityContext: &corev1.SecurityContext{ Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"NET_RAW"}, + Add: []corev1.Capability{corev1.Capability("NET_ADMIN"), corev1.Capability("SYS_NICE")}, }, }, Resources: corev1.ResourceRequirements{ @@ -328,6 +333,15 @@ func buildVolumeMountSpecForCatalog(instance *databasev1alpha1.ShardingDatabase, result = append(result, corev1.VolumeMount{Name: OraCatalogSpex.Name + "orastage-vol7", MountPath: oraStage}) } + if checkTdeWalletFlag(instance) { + if len(instance.Spec.FssStorageClass) > 0 && len(instance.Spec.TdeWalletPvc) == 0 { + result = append(result, corev1.VolumeMount{Name: instance.Name + "shared-storage", MountPath: getTdeWalletMountLoc(instance)}) + } else { + if len(instance.Spec.FssStorageClass) == 0 && len(instance.Spec.TdeWalletPvc) > 0 { + result = append(result, corev1.VolumeMount{Name: OraCatalogSpex.Name + "shared-storage-vol8", MountPath: getTdeWalletMountLoc(instance)}) + } + } + } return result } @@ -372,6 +386,34 @@ func volumeClaimTemplatesForCatalog(instance *databasev1alpha1.ShardingDatabase, claims[0].Spec.Selector = &metav1.LabelSelector{MatchLabels: OraCatalogSpex.PvMatchLabels} } + if checkTdeWalletFlag(instance) { + if len(instance.Spec.FssStorageClass) > 0 && len(instance.Spec.TdeWalletPvc) == 0 { + { + pvcClaim := corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name + "shared-storage", + Namespace: instance.Spec.Namespace, + OwnerReferences: getOwnerRef(instance), + Labels: buildLabelsForCatalog(instance, "sharding"), + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteMany, + }, + StorageClassName: &instance.Spec.FssStorageClass, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse(strconv.FormatInt(int64(OraCatalogSpex.StorageSizeInGb), 10) + "Gi"), + }, + }, + }, + } + + claims = append(claims, pvcClaim) + } + } + } + return claims } @@ -454,7 +496,7 @@ func UpdateProvForCatalog(instance *databasev1alpha1.ShardingDatabase, oraSpexRes := OraCatalogSpex.Resources if !reflect.DeepEqual(shardContaineRes, oraSpexRes) { - isUpdate = true + isUpdate = false } } } diff --git a/commons/sharding/exec.go b/commons/sharding/exec.go index c1921018..44f91e51 100644 --- a/commons/sharding/exec.go +++ b/commons/sharding/exec.go @@ -63,12 +63,33 @@ import ( // ExecCMDInContainer execute command in first container of a pod func ExecCommand(podName string, cmd []string, kubeClient kubernetes.Interface, kubeConfig clientcmd.ClientConfig, instance *databasealphav1.ShardingDatabase, logger logr.Logger) (string, string, error) { + var err1 error = nil var msg string var ( execOut bytes.Buffer execErr bytes.Buffer ) + for i := 0; i < 5; i++ { + if scheme.Scheme == nil { + time.Sleep(time.Second * 40) + } else { + break + } + } + + if kubeClient == nil { + msg = "ExecCommand() : kubeClient is nil" + err1 = fmt.Errorf(msg) + return "Error:","kubeClient is nil",err1 + } + if kubeConfig == nil { + msg = "ExecCommand() : kubeConfig is nil" + err1 = fmt.Errorf(msg) + return "Error:","kubeConfig is nil",err1 + } + + msg = "" req := kubeClient.CoreV1().RESTClient(). Post(). Namespace(instance.Spec.Namespace). diff --git a/commons/sharding/gsm.go b/commons/sharding/gsm.go index 6ac1414f..bcdc8866 100644 --- a/commons/sharding/gsm.go +++ b/commons/sharding/gsm.go @@ -466,7 +466,7 @@ func UpdateProvForGsm(instance *databasev1alpha1.ShardingDatabase, oraSpexRes := OraGsmSpex.Resources if !reflect.DeepEqual(shardContaineRes, oraSpexRes) { - isUpdate = true + isUpdate = false } } } diff --git a/commons/sharding/scommon.go b/commons/sharding/scommon.go index d5d438b1..99987661 100644 --- a/commons/sharding/scommon.go +++ b/commons/sharding/scommon.go @@ -40,6 +40,7 @@ package commons import ( "context" + "encoding/json" "fmt" "slices" @@ -106,6 +107,8 @@ func buildEnvVarsSpec(instance *databasealphav1.ShardingDatabase, variables []da var result []corev1.EnvVar var varinfo string var sidFlag bool = false + //var sidValue string + var pdbValue string var pdbFlag bool = false var sDirectParam bool = false var sGroup1Params bool = false @@ -115,13 +118,17 @@ func buildEnvVarsSpec(instance *databasealphav1.ShardingDatabase, variables []da var oldSidFlag bool = false var archiveLogFlag bool = false var shardSetupFlag bool = false + var dbUnameFlag bool = false + var ofreePdbFlag bool = false for _, variable := range variables { if variable.Name == "ORACLE_SID" { sidFlag = true + //sidValue = variable.Value } if variable.Name == "ORACLE_PDB" { pdbFlag = true + pdbValue = variable.Value } if variable.Name == "SHARD_DIRECTOR_PARAMS" { sDirectParam = true @@ -144,9 +151,32 @@ func buildEnvVarsSpec(instance *databasealphav1.ShardingDatabase, variables []da if variable.Name == "OLD_ORACLE_PDB" { archiveLogFlag = true } + if variable.Name == "DB_UNIQUE_NAME" { + dbUnameFlag = true + } + if variable.Name == "ORACLE_FREE_PDB" { + ofreePdbFlag = true + } + result = append(result, corev1.EnvVar{Name: variable.Name, Value: variable.Value}) } + if !dbUnameFlag { + if strings.ToLower(instance.Spec.DbEdition) == "free" { + result = append(result, corev1.EnvVar{Name: "DB_UNIQUE_NAME", Value: strings.ToUpper(name)}) + } + } + + if !ofreePdbFlag { + if strings.ToLower(instance.Spec.DbEdition) == "free" { + if pdbFlag { + result = append(result, corev1.EnvVar{Name: "ORACLE_FREE_PDB", Value: pdbValue}) + } else { + result = append(result, corev1.EnvVar{Name: "ORACLE_FREE_PDB", Value: strings.ToUpper(name) + "PDB"}) + } + } + } + if !shardSetupFlag { if restype == "SHARD" { result = append(result, corev1.EnvVar{Name: "SHARD_SETUP", Value: "true"}) @@ -167,19 +197,27 @@ func buildEnvVarsSpec(instance *databasealphav1.ShardingDatabase, variables []da } } if !sidFlag { - if restype == "SHARD" { - result = append(result, corev1.EnvVar{Name: "ORACLE_SID", Value: strings.ToUpper(name)}) - } - if restype == "CATALOG" { - result = append(result, corev1.EnvVar{Name: "ORACLE_SID", Value: strings.ToUpper(name)}) + if strings.ToLower(instance.Spec.DbEdition) == "free" { + result = append(result, corev1.EnvVar{Name: "ORACLE_SID", Value: "FREE"}) + } else { + if restype == "SHARD" { + result = append(result, corev1.EnvVar{Name: "ORACLE_SID", Value: strings.ToUpper(name)}) + } + if restype == "CATALOG" { + result = append(result, corev1.EnvVar{Name: "ORACLE_SID", Value: strings.ToUpper(name)}) + } } } if !pdbFlag { - if restype == "SHARD" { - result = append(result, corev1.EnvVar{Name: "ORACLE_PDB", Value: strings.ToUpper(name) + "PDB"}) - } - if restype == "CATALOG" { - result = append(result, corev1.EnvVar{Name: "ORACLE_PDB", Value: strings.ToUpper(name) + "PDB"}) + if strings.ToLower(instance.Spec.DbEdition) == "free" { + result = append(result, corev1.EnvVar{Name: "ORACLE_PDB", Value: "FREEPDB"}) + } else { + if restype == "SHARD" { + result = append(result, corev1.EnvVar{Name: "ORACLE_PDB", Value: strings.ToUpper(name) + "PDB"}) + } + if restype == "CATALOG" { + result = append(result, corev1.EnvVar{Name: "ORACLE_PDB", Value: strings.ToUpper(name) + "PDB"}) + } } } // Secret Settings @@ -256,6 +294,10 @@ func buildEnvVarsSpec(instance *databasealphav1.ShardingDatabase, variables []da result = append(result, corev1.EnvVar{Name: "DEV_MODE", Value: "TRUE"}) } + if instance.Spec.InvitedNodeSubnetFlag == "" { + instance.Spec.InvitedNodeSubnetFlag = "FALSE" + + } if strings.ToUpper(instance.Spec.InvitedNodeSubnetFlag) != "FALSE" { result = append(result, corev1.EnvVar{Name: "INVITED_NODE_SUBNET_FLAG", Value: "TRUE"}) if instance.Spec.InvitedNodeSubnet != "" { @@ -727,13 +769,26 @@ func buildCatalogParams(instance *databasealphav1.ShardingDatabase) string { } for _, variable := range variables { - if variable.Name == "ORACLE_SID" { + if variable.Name == "DB_UNIQUE_NAME" { sidFlag = true sidName = variable.Value + } else { + if variable.Name == "ORACLE_SID" { + sidFlag = true + sidName = variable.Value + } } - if variable.Name == "ORACLE_PDB" { - pdbFlag = true - pdbName = variable.Value + if variable.Name == "ORACLE_FREE_PDB" { + if strings.ToLower(instance.Spec.DbEdition) == "free" { + pdbFlag = true + pdbName = variable.Value + } + } + if strings.ToLower(instance.Spec.DbEdition) != "free" { + if variable.Name == "ORACLE_PDB" { + pdbFlag = true + pdbName = variable.Value + } } if variable.Name == "CATALOG_PORT" { portFlag = true @@ -754,16 +809,26 @@ func buildCatalogParams(instance *databasealphav1.ShardingDatabase) string { varinfo = "catalog_db=" + strings.ToUpper(instance.Spec.Catalog[0].Name) + ";" result = result + varinfo } else { - varinfo = "catalog_db=" + strings.ToUpper(sidName) + ";" - result = result + varinfo + if strings.ToLower(instance.Spec.DbEdition) == "free" { + varinfo = "catalog_db=" + strings.ToUpper(instance.Spec.Catalog[0].Name) + ";" + result = result + varinfo + } else { + varinfo = "catalog_db=" + strings.ToUpper(sidName) + ";" + result = result + varinfo + } } if !pdbFlag { varinfo = "catalog_pdb=" + strings.ToUpper(instance.Spec.Catalog[0].Name) + "PDB" + ";" result = result + varinfo } else { - varinfo = "catalog_pdb=" + strings.ToUpper(pdbName) + ";" - result = result + varinfo + if strings.ToLower(instance.Spec.DbEdition) == "free" { + varinfo = "catalog_pdb=" + strings.ToUpper(instance.Spec.Catalog[0].Name) + "PDB" + ";" + result = result + varinfo + } else { + varinfo = "catalog_pdb=" + strings.ToUpper(pdbName) + ";" + result = result + varinfo + } } if !portFlag { @@ -781,8 +846,13 @@ func buildCatalogParams(instance *databasealphav1.ShardingDatabase) string { varinfo = "catalog_name=" + strings.ToUpper(cname) + ";" result = result + varinfo } + if chunksFlag { result = result + "catalog_chunks=" + catchunks + ";" + } else { + if strings.ToLower(instance.Spec.DbEdition) == "free" && strings.ToUpper(instance.Spec.ShardingType) != "USER" && strings.ToUpper(instance.Spec.ShardingType) != "NATIVE" { + result = result + "catalog_chunks=12;" + } } result = strings.TrimSuffix(result, ";") return result @@ -840,6 +910,15 @@ func BuildShardParams(instance *databasealphav1.ShardingDatabase, sfSet *appsv1. var result string var varinfo string var isShardPort bool = false + var freePdbFlag bool = false + var freePdbValue string + var pdbFlag bool = false + var pdbValue string + var dbUnameFlag bool = false + var sidFlag bool = false + var dbUname string + var sidName string + //var isShardGrp bool = false //var i int32 //var isShardSpace bool = false @@ -847,14 +926,25 @@ func BuildShardParams(instance *databasealphav1.ShardingDatabase, sfSet *appsv1. result = "shard_host=" + sfSet.Name + "-0" + "." + sfSet.Name + ";" for _, variable := range variables { - if variable.Name == "ORACLE_SID" { - varinfo = "shard_db=" + variable.Value + ";" - result = result + varinfo + if variable.Name == "DB_UNIQUE_NAME" { + dbUnameFlag = true + dbUname = variable.Value + } else { + if variable.Name == "ORACLE_SID" { + sidFlag = true + sidName = variable.Value + } } + if variable.Name == "ORACLE_FREE_PDB" { + freePdbFlag = true + freePdbValue = variable.Value + } + if variable.Name == "ORACLE_PDB" { - varinfo = "shard_pdb=" + variable.Value + ";" - result = result + varinfo + pdbFlag = true + pdbValue = variable.Value } + if variable.Name == "SHARD_PORT" { varinfo = "shard_port=" + variable.Value + ";" result = result + varinfo @@ -862,6 +952,41 @@ func BuildShardParams(instance *databasealphav1.ShardingDatabase, sfSet *appsv1. } } + + if dbUnameFlag { + varinfo = "shard_db=" + dbUname + ";" + result = result + varinfo + } + + if sidFlag && !dbUnameFlag { + if strings.ToLower(instance.Spec.DbEdition) != "free" { + varinfo = "shard_db=" + sidName + ";" + result = result + varinfo + } else { + varinfo = "shard_db=" + sfSet.Name + ";" + result = result + varinfo + } + } + + if !sidFlag && !dbUnameFlag { + if strings.ToLower(instance.Spec.DbEdition) != "free" { + varinfo = "shard_db=" + sfSet.Name + ";" + result = result + varinfo + } + } + + if freePdbFlag { + if strings.ToLower(instance.Spec.DbEdition) == "free" { + varinfo = "shard_pdb=" + freePdbValue + ";" + result = result + varinfo + } + } else { + if pdbFlag { + varinfo = "shard_pdb=" + pdbValue + ";" + result = result + varinfo + } + } + if OraShardSpex.ShardGroup != "" { varinfo = "shard_group=" + OraShardSpex.ShardGroup + ";" result = result + varinfo @@ -1205,7 +1330,7 @@ func CheckOnlineShardInGsm(gsmPodName string, sparams string, instance *database _, _, err := ExecCommand(gsmPodName, getOnlineShardCmd(sparams), kubeClient, kubeconfig, instance, logger) if err != nil { - msg := "Shard: " + GetFmtStr(sparams) + " is not onine in GSM." + msg := "Shard: " + GetFmtStr(sparams) + " is not online in GSM." LogMessages("INFO", msg, nil, instance, logger) return err } @@ -1355,6 +1480,28 @@ func SfsetLabelPatch(sfSetFound *appsv1.StatefulSet, sfSetPod *corev1.Pod, insta return nil } +func InstanceShardPatch(obj client.Object, instance *databasealphav1.ShardingDatabase, kClient client.Client, id int32, field string, value string, +) error { + + var err error + instSpec := instance.Spec + instSpec.Shard[id].IsDelete = "failed" + instshardM, _ := json.Marshal(struct { + Spec *databasealphav1.ShardingDatabaseSpec `json:"spec":` + }{ + Spec: &instSpec, + }) + + patch1 := client.RawPatch(types.MergePatchType, instshardM) + err = kClient.Patch(context.TODO(), obj, patch1) + + if err != nil { + return err + } + + return err +} + // Send Notification func SendNotification(title string, body string, instance *databasealphav1.ShardingDatabase, topicId string, rclient ons.NotificationDataPlaneClient, logger logr.Logger, @@ -1377,3 +1524,27 @@ func SendNotification(title string, body string, instance *databasealphav1.Shard func GetSecretMount() string { return oraSecretMount } + +func checkTdeWalletFlag(instance *databasev1alpha1.ShardingDatabase) bool { + if strings.ToLower(instance.Spec.IsTdeWallet) == "enable" { + return true + } + return false +} + +func CheckIsDeleteFlag(delStr string, instance *databasealphav1.ShardingDatabase, logger logr.Logger) bool { + if strings.ToLower(delStr) == "enable" { + return true + } + if strings.ToLower(delStr) == "failed" { + // LogMessages("INFO", "manual intervention required", nil, instance, logger) + } + return false +} + +func getTdeWalletMountLoc(instance *databasev1alpha1.ShardingDatabase) string { + if len(instance.Spec.TdeWalletPvcMountLocation) > 0 { + return instance.Spec.TdeWalletPvcMountLocation + } + return "/tdewallet/" + instance.Name +} diff --git a/commons/sharding/shard.go b/commons/sharding/shard.go index 6dd7a6ea..c76fc0e5 100644 --- a/commons/sharding/shard.go +++ b/commons/sharding/shard.go @@ -197,6 +197,13 @@ func buildVolumeSpecForShard(instance *databasev1alpha1.ShardingDatabase, OraSha if instance.Spec.IsDownloadScripts { result = append(result, corev1.Volume{Name: OraShardSpex.Name + "orascript-vol5", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}) } + + if checkTdeWalletFlag(instance) { + if len(instance.Spec.FssStorageClass) == 0 && len(instance.Spec.TdeWalletPvc) > 0 { + result = append(result, corev1.Volume{Name: OraShardSpex.Name + "shared-storage-vol8", VolumeSource: corev1.VolumeSource{PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: instance.Spec.TdeWalletPvc}}}) + } + } + return result } @@ -209,7 +216,7 @@ func buildContainerSpecForShard(instance *databasev1alpha1.ShardingDatabase, Ora Image: instance.Spec.DbImage, SecurityContext: &corev1.SecurityContext{ Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"NET_RAW"}, + Add: []corev1.Capability{corev1.Capability("NET_ADMIN"), corev1.Capability("SYS_NICE")}, }, }, Resources: corev1.ResourceRequirements{ @@ -331,6 +338,16 @@ func buildVolumeMountSpecForShard(instance *databasev1alpha1.ShardingDatabase, O result = append(result, corev1.VolumeMount{Name: OraShardSpex.Name + "orastage-vol7", MountPath: oraStage}) } + if checkTdeWalletFlag(instance) { + if len(instance.Spec.FssStorageClass) > 0 && len(instance.Spec.TdeWalletPvc) == 0 { + result = append(result, corev1.VolumeMount{Name: instance.Name + "shared-storage" + instance.Spec.Catalog[0].Name + "-0", MountPath: getTdeWalletMountLoc(instance)}) + } else { + if len(instance.Spec.FssStorageClass) == 0 && len(instance.Spec.TdeWalletPvc) > 0 { + result = append(result, corev1.VolumeMount{Name: OraShardSpex.Name + "shared-storage-vol8", MountPath: getTdeWalletMountLoc(instance)}) + } + } + } + return result } @@ -463,7 +480,7 @@ func UpdateProvForShard(instance *databasev1alpha1.ShardingDatabase, OraShardSpe oraSpexRes := OraShardSpex.Resources if !reflect.DeepEqual(shardContaineRes, oraSpexRes) { - isUpdate = true + isUpdate = false } } } diff --git a/config/crd/bases/database.oracle.com_pdbs.yaml b/config/crd/bases/database.oracle.com_pdbs.yaml index 8e6cb94f..85af8c1b 100644 --- a/config/crd/bases/database.oracle.com_pdbs.yaml +++ b/config/crd/bases/database.oracle.com_pdbs.yaml @@ -119,6 +119,11 @@ spec: to plug in a PDB. This property is applicable when the Action property is PLUG but not required. type: boolean + assertivePdbDeletion: + description: turn on the assertive approach to delete pdb resource + kubectl delete pdb ..... automatically triggers the pluggable database + deletion + type: boolean cdbName: description: Name of the CDB type: string diff --git a/config/crd/bases/database.oracle.com_shardingdatabases.yaml b/config/crd/bases/database.oracle.com_shardingdatabases.yaml index 554ed506..641629a0 100644 --- a/config/crd/bases/database.oracle.com_shardingdatabases.yaml +++ b/config/crd/bases/database.oracle.com_shardingdatabases.yaml @@ -72,7 +72,7 @@ spec: a container image type: string isDelete: - type: boolean + type: string label: type: string name: @@ -148,6 +148,8 @@ spec: - name type: object type: array + dbEdition: + type: string dbImage: type: string dbImagePullSecret: @@ -177,6 +179,8 @@ spec: - name - pwdFileName type: object + fssStorageClass: + type: string gsm: items: description: GsmSpec defines the desired state of GsmSpec @@ -205,7 +209,7 @@ spec: a container image type: string isDelete: - type: boolean + type: string label: type: string name: @@ -395,7 +399,7 @@ spec: isExternalSvc: type: boolean isTdeWallet: - type: boolean + type: string liveinessCheckPeriod: type: integer namespace: @@ -454,7 +458,12 @@ spec: a container image type: string isDelete: - type: boolean + enum: + - enable + - disable + - failed + - force + type: string label: type: string name: @@ -550,6 +559,10 @@ spec: type: string storageClass: type: string + tdeWalletPvc: + type: string + tdeWalletPvcMountLocation: + type: string required: - catalog - dbImage diff --git a/config/crd/bases/database.oracle.com_singleinstancedatabases.yaml b/config/crd/bases/database.oracle.com_singleinstancedatabases.yaml index acfa2bea..1c011e17 100644 --- a/config/crd/bases/database.oracle.com_singleinstancedatabases.yaml +++ b/config/crd/bases/database.oracle.com_singleinstancedatabases.yaml @@ -175,6 +175,23 @@ spec: type: integer replicas: type: integer + resources: + properties: + limits: + properties: + cpu: + type: string + memory: + type: string + type: object + requests: + properties: + cpu: + type: string + memory: + type: string + type: object + type: object serviceAccountName: type: string serviceAnnotations: diff --git a/config/database.oracle.com_DbcsSystem.yaml b/config/database.oracle.com_DbcsSystem.yaml new file mode 100644 index 00000000..e933d5a4 --- /dev/null +++ b/config/database.oracle.com_DbcsSystem.yaml @@ -0,0 +1,240 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: DbcsSystem.database.oracle.com +spec: + group: database.oracle.com + names: + kind: DbcsSystem + listKind: DbcsSystemList + plural: DbcsSystem + singular: dbcssystem + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: DbcsSystem is the Schema for the dbcssystems API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DbcsSystemSpec defines the desired state of DbcsSystem + properties: + dbSystem: + properties: + availabilityDomain: + type: string + backupSubnetId: + type: string + clusterName: + type: string + compartmentId: + type: string + cpuCoreCount: + type: integer + dbAdminPaswordSecret: + type: string + dbBackupConfig: + description: DB Backup COnfig Network Struct + properties: + autoBackupEnabled: + type: boolean + autoBackupWindow: + type: string + backupDestinationDetails: + type: string + recoveryWindowsInDays: + type: integer + type: object + dbDomain: + type: string + dbEdition: + type: string + dbName: + type: string + dbUniqueName: + type: string + dbVersion: + type: string + dbWorkload: + type: string + diskRedundancy: + type: string + displayName: + type: string + domain: + type: string + faultDomains: + items: + type: string + type: array + hostName: + type: string + initialDataStorageSizeInGB: + type: integer + kmsKeyId: + type: string + kmsKeyVersionId: + type: string + licenseModel: + type: string + nodeCount: + type: integer + pdbName: + type: string + privateIp: + type: string + shape: + type: string + sshPublicKeys: + items: + type: string + type: array + storageManagement: + type: string + subnetId: + type: string + tags: + additionalProperties: + type: string + type: object + tdeWalletPasswordSecret: + type: string + timeZone: + type: string + required: + - availabilityDomain + - compartmentId + - dbAdminPaswordSecret + - hostName + - shape + - sshPublicKeys + - subnetId + type: object + hardLink: + type: boolean + id: + type: string + ociConfigMap: + type: string + ociSecret: + type: string + required: + - ociConfigMap + type: object + status: + description: DbcsSystemStatus defines the observed state of DbcsSystem + properties: + availabilityDomain: + type: string + cpuCoreCount: + type: integer + dataStoragePercentage: + type: integer + dataStorageSizeInGBs: + type: integer + dbEdition: + type: string + dbInfo: + items: + description: DbcsSystemStatus defines the observed state of DbcsSystem + properties: + dbHomeId: + type: string + dbName: + type: string + dbUniqueName: + type: string + dbWorkload: + type: string + id: + type: string + type: object + type: array + displayName: + type: string + id: + type: string + licenseModel: + type: string + network: + properties: + clientSubnet: + type: string + domainName: + type: string + hostName: + type: string + listenerPort: + type: integer + networkSG: + type: string + scanDnsName: + type: string + vcnName: + type: string + type: object + nodeCount: + type: integer + recoStorageSizeInGB: + type: integer + shape: + type: string + state: + type: string + storageManagement: + type: string + subnetId: + type: string + timeZone: + type: string + workRequests: + items: + properties: + operationId: + type: string + operationType: + type: string + percentComplete: + type: string + timeAccepted: + type: string + timeFinished: + type: string + timeStarted: + type: string + required: + - operationId + - operationType + type: object + type: array + required: + - state + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/database.oracle.com_autonomouscontainerdatabases.yaml b/config/database.oracle.com_autonomouscontainerdatabases.yaml new file mode 100644 index 00000000..bac3a28c --- /dev/null +++ b/config/database.oracle.com_autonomouscontainerdatabases.yaml @@ -0,0 +1,117 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: autonomouscontainerdatabases.database.oracle.com +spec: + group: database.oracle.com + names: + kind: AutonomousContainerDatabase + listKind: AutonomousContainerDatabaseList + plural: autonomouscontainerdatabases + shortNames: + - acd + - acds + singular: autonomouscontainerdatabase + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.displayName + name: DisplayName + type: string + - jsonPath: .status.lifecycleState + name: State + type: string + - jsonPath: .status.timeCreated + name: Created + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: AutonomousContainerDatabase is the Schema for the autonomouscontainerdatabases + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AutonomousContainerDatabaseSpec defines the desired state + of AutonomousContainerDatabase + properties: + action: + enum: + - SYNC + - RESTART + - TERMINATE + type: string + autonomousContainerDatabaseOCID: + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file' + type: string + autonomousExadataVMClusterOCID: + type: string + compartmentOCID: + type: string + displayName: + type: string + freeformTags: + additionalProperties: + type: string + type: object + hardLink: + default: false + type: boolean + ociConfig: + description: "*********************** *\tOCI config ***********************" + properties: + configMapName: + type: string + secretName: + type: string + type: object + patchModel: + description: 'AutonomousContainerDatabasePatchModelEnum Enum with + underlying type: string' + enum: + - RELEASE_UPDATES + - RELEASE_UPDATE_REVISIONS + type: string + type: object + status: + description: AutonomousContainerDatabaseStatus defines the observed state + of AutonomousContainerDatabase + properties: + lifecycleState: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file' + type: string + timeCreated: + type: string + required: + - lifecycleState + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/database.oracle.com_autonomousdatabasebackups.yaml b/config/database.oracle.com_autonomousdatabasebackups.yaml new file mode 100644 index 00000000..a5c37507 --- /dev/null +++ b/config/database.oracle.com_autonomousdatabasebackups.yaml @@ -0,0 +1,138 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: autonomousdatabasebackups.database.oracle.com +spec: + group: database.oracle.com + names: + kind: AutonomousDatabaseBackup + listKind: AutonomousDatabaseBackupList + plural: autonomousdatabasebackups + shortNames: + - adbbu + - adbbus + singular: autonomousdatabasebackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.lifecycleState + name: State + type: string + - jsonPath: .status.dbDisplayName + name: DB DisplayName + type: string + - jsonPath: .status.type + name: Type + type: string + - jsonPath: .status.timeStarted + name: Started + type: string + - jsonPath: .status.timeEnded + name: Ended + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: AutonomousDatabaseBackup is the Schema for the autonomousdatabasebackups + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AutonomousDatabaseBackupSpec defines the desired state of + AutonomousDatabaseBackup + properties: + autonomousDatabaseBackupOCID: + type: string + displayName: + type: string + isLongTermBackup: + type: boolean + ociConfig: + description: "*********************** *\tOCI config ***********************" + properties: + configMapName: + type: string + secretName: + type: string + type: object + retentionPeriodInDays: + type: integer + target: + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file' + properties: + k8sADB: + description: "*********************** *\tADB spec ***********************" + properties: + name: + type: string + type: object + ociADB: + properties: + ocid: + type: string + type: object + type: object + type: object + status: + description: AutonomousDatabaseBackupStatus defines the observed state + of AutonomousDatabaseBackup + properties: + autonomousDatabaseOCID: + type: string + compartmentOCID: + type: string + dbDisplayName: + type: string + dbName: + type: string + isAutomatic: + type: boolean + lifecycleState: + description: 'AutonomousDatabaseBackupLifecycleStateEnum Enum with + underlying type: string' + type: string + timeEnded: + type: string + timeStarted: + type: string + type: + description: 'AutonomousDatabaseBackupTypeEnum Enum with underlying + type: string' + type: string + required: + - autonomousDatabaseOCID + - compartmentOCID + - dbDisplayName + - dbName + - isAutomatic + - lifecycleState + - type + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/database.oracle.com_autonomousdatabaserestores.yaml b/config/database.oracle.com_autonomousdatabaserestores.yaml new file mode 100644 index 00000000..5e9f2c73 --- /dev/null +++ b/config/database.oracle.com_autonomousdatabaserestores.yaml @@ -0,0 +1,138 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: autonomousdatabaserestores.database.oracle.com +spec: + group: database.oracle.com + names: + kind: AutonomousDatabaseRestore + listKind: AutonomousDatabaseRestoreList + plural: autonomousdatabaserestores + shortNames: + - adbr + - adbrs + singular: autonomousdatabaserestore + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.displayName + name: DbDisplayName + type: string + - jsonPath: .status.dbName + name: DbName + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: AutonomousDatabaseRestore is the Schema for the autonomousdatabaserestores + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AutonomousDatabaseRestoreSpec defines the desired state of + AutonomousDatabaseRestore + properties: + ociConfig: + description: "*********************** *\tOCI config ***********************" + properties: + configMapName: + type: string + secretName: + type: string + type: object + source: + properties: + k8sADBBackup: + description: 'EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO + OWN! NOTE: json tags are required. Any new fields you add must + have json tags for the fields to be serialized.' + properties: + name: + type: string + type: object + pointInTime: + properties: + timestamp: + description: 'The timestamp must follow this format: YYYY-MM-DD + HH:MM:SS GMT' + type: string + type: object + type: object + target: + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file' + properties: + k8sADB: + description: "*********************** *\tADB spec ***********************" + properties: + name: + type: string + type: object + ociADB: + properties: + ocid: + type: string + type: object + type: object + required: + - source + - target + type: object + status: + description: AutonomousDatabaseRestoreStatus defines the observed state + of AutonomousDatabaseRestore + properties: + dbName: + type: string + displayName: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file' + type: string + status: + description: 'WorkRequestStatusEnum Enum with underlying type: string' + type: string + timeAccepted: + type: string + timeEnded: + type: string + timeStarted: + type: string + workRequestOCID: + type: string + required: + - dbName + - displayName + - status + - workRequestOCID + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/database.oracle.com_autonomousdatabases.yaml b/config/database.oracle.com_autonomousdatabases.yaml new file mode 100644 index 00000000..f77407f3 --- /dev/null +++ b/config/database.oracle.com_autonomousdatabases.yaml @@ -0,0 +1,324 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: autonomousdatabases.database.oracle.com +spec: + group: database.oracle.com + names: + kind: AutonomousDatabase + listKind: AutonomousDatabaseList + plural: autonomousdatabases + shortNames: + - adb + - adbs + singular: autonomousdatabase + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.details.displayName + name: Display Name + type: string + - jsonPath: .spec.details.dbName + name: Db Name + type: string + - jsonPath: .status.lifecycleState + name: State + type: string + - jsonPath: .spec.details.isDedicated + name: Dedicated + type: string + - jsonPath: .spec.details.cpuCoreCount + name: OCPUs + type: integer + - jsonPath: .spec.details.dataStorageSizeInTBs + name: Storage (TB) + type: integer + - jsonPath: .spec.details.dbWorkload + name: Workload Type + type: string + - jsonPath: .status.timeCreated + name: Created + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: AutonomousDatabase is the Schema for the autonomousdatabases + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'AutonomousDatabaseSpec defines the desired state of AutonomousDatabase + Important: Run "make" to regenerate code after modifying this file' + properties: + details: + description: AutonomousDatabaseDetails defines the detail information + of AutonomousDatabase, corresponding to oci-go-sdk/database/AutonomousDatabase + properties: + adminPassword: + properties: + k8sSecret: + description: "*********************** *\tSecret specs ***********************" + properties: + name: + type: string + type: object + ociSecret: + properties: + ocid: + type: string + type: object + type: object + autonomousContainerDatabase: + description: ACDSpec defines the spec of the target for backup/restore + runs. The name could be the name of an AutonomousDatabase or + an AutonomousDatabaseBackup + properties: + k8sACD: + description: "*********************** *\tACD specs ***********************" + properties: + name: + type: string + type: object + ociACD: + properties: + ocid: + type: string + type: object + type: object + autonomousDatabaseOCID: + type: string + compartmentOCID: + type: string + cpuCoreCount: + type: integer + dataStorageSizeInTBs: + type: integer + dbName: + type: string + dbVersion: + type: string + dbWorkload: + description: 'AutonomousDatabaseDbWorkloadEnum Enum with underlying + type: string' + enum: + - OLTP + - DW + - AJD + - APEX + type: string + displayName: + type: string + freeformTags: + additionalProperties: + type: string + type: object + isAutoScalingEnabled: + type: boolean + isDedicated: + type: boolean + licenseModel: + description: 'AutonomousDatabaseLicenseModelEnum Enum with underlying + type: string' + enum: + - LICENSE_INCLUDED + - BRING_YOUR_OWN_LICENSE + type: string + lifecycleState: + description: 'AutonomousDatabaseLifecycleStateEnum Enum with underlying + type: string' + type: string + networkAccess: + properties: + accessControlList: + items: + type: string + type: array + accessType: + enum: + - "" + - PUBLIC + - RESTRICTED + - PRIVATE + type: string + isAccessControlEnabled: + type: boolean + isMTLSConnectionRequired: + type: boolean + privateEndpoint: + properties: + hostnamePrefix: + type: string + nsgOCIDs: + items: + type: string + type: array + subnetOCID: + type: string + type: object + type: object + wallet: + properties: + name: + type: string + password: + properties: + k8sSecret: + description: "*********************** *\tSecret specs + ***********************" + properties: + name: + type: string + type: object + ociSecret: + properties: + ocid: + type: string + type: object + type: object + type: object + type: object + hardLink: + default: false + type: boolean + ociConfig: + description: "*********************** *\tOCI config ***********************" + properties: + configMapName: + type: string + secretName: + type: string + type: object + required: + - details + type: object + status: + description: AutonomousDatabaseStatus defines the observed state of AutonomousDatabase + properties: + allConnectionStrings: + items: + properties: + connectionStrings: + items: + properties: + connectionString: + type: string + tnsName: + type: string + type: object + type: array + tlsAuthentication: + type: string + required: + - connectionStrings + type: object + type: array + conditions: + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n \ttype FooStatus struct{ \t // Represents the observations + of a foo's current state. \t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\" \t // + +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map + \t // +listMapKey=type \t Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields + \t}" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + lifecycleState: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file' + type: string + timeCreated: + type: string + walletExpiringDate: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/database.oracle.com_cdbs.yaml b/config/database.oracle.com_cdbs.yaml new file mode 100644 index 00000000..6b1c350c --- /dev/null +++ b/config/database.oracle.com_cdbs.yaml @@ -0,0 +1,270 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: cdbs.database.oracle.com +spec: + group: database.oracle.com + names: + kind: CDB + listKind: CDBList + plural: cdbs + singular: cdb + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Name of the CDB + jsonPath: .spec.cdbName + name: CDB Name + type: string + - description: ' Name of the DB Server' + jsonPath: .spec.dbServer + name: DB Server + type: string + - description: DB server port + jsonPath: .spec.dbPort + name: DB Port + type: integer + - description: ' string of the tnsalias' + jsonPath: .spec.dbTnsurl + name: TNS STRING + type: string + - description: Replicas + jsonPath: .spec.replicas + name: Replicas + type: integer + - description: Status of the CDB Resource + jsonPath: .status.phase + name: Status + type: string + - description: Error message, if any + jsonPath: .status.msg + name: Message + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: CDB is the Schema for the cdbs API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CDBSpec defines the desired state of CDB + properties: + cdbAdminPwd: + description: Password for the CDB Administrator to manage PDB lifecycle + properties: + secret: + description: CDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbAdminUser: + description: User in the root container with sysdba priviledges to + manage PDB lifecycle + properties: + secret: + description: CDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbName: + description: Name of the CDB + type: string + cdbTlsCrt: + properties: + secret: + description: CDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbTlsKey: + properties: + secret: + description: CDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + dbPort: + description: DB server port + type: integer + dbServer: + description: Name of the DB server + type: string + dbTnsurl: + type: string + nodeSelector: + additionalProperties: + type: string + description: Node Selector for running the Pod + type: object + ordsImage: + description: ORDS Image Name + type: string + ordsImagePullPolicy: + description: ORDS Image Pull Policy + enum: + - Always + - Never + type: string + ordsImagePullSecret: + description: The name of the image pull secret in case of a private + docker repository. + type: string + ordsPort: + description: ORDS server port. For now, keep it as 8888. TO BE USED + IN FUTURE RELEASE. + type: integer + ordsPwd: + description: Password for user ORDS_PUBLIC_USER + properties: + secret: + description: CDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + replicas: + description: Number of ORDS Containers to create + type: integer + serviceName: + description: Name of the CDB Service + type: string + sysAdminPwd: + description: Password for the CDB System Administrator + properties: + secret: + description: CDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerPwd: + description: Password for the Web Server User + properties: + secret: + description: CDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerUser: + description: Web Server User with SQL Administrator role to allow + us to authenticate to the PDB Lifecycle Management REST endpoints + properties: + secret: + description: CDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + type: object + status: + description: CDBStatus defines the observed state of CDB + properties: + msg: + description: Message + type: string + phase: + description: Phase of the CDB Resource + type: string + status: + description: CDB Resource Status + type: boolean + required: + - phase + - status + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/database.oracle.com_dataguardbrokers.yaml b/config/database.oracle.com_dataguardbrokers.yaml new file mode 100644 index 00000000..f19a3e22 --- /dev/null +++ b/config/database.oracle.com_dataguardbrokers.yaml @@ -0,0 +1,134 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: dataguardbrokers.database.oracle.com +spec: + group: database.oracle.com + names: + kind: DataguardBroker + listKind: DataguardBrokerList + plural: dataguardbrokers + singular: dataguardbroker + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.primaryDatabase + name: Primary + type: string + - jsonPath: .status.standbyDatabases + name: Standbys + type: string + - jsonPath: .spec.protectionMode + name: Protection Mode + type: string + - jsonPath: .status.clusterConnectString + name: Cluster Connect Str + priority: 1 + type: string + - jsonPath: .status.externalConnectString + name: Connect Str + type: string + - jsonPath: .spec.primaryDatabaseRef + name: Primary Database + priority: 1 + type: string + - jsonPath: .status.status + name: Status + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: DataguardBroker is the Schema for the dataguardbrokers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DataguardBrokerSpec defines the desired state of DataguardBroker + properties: + fastStartFailOver: + properties: + enable: + type: boolean + strategy: + items: + description: FSFO strategy + properties: + sourceDatabaseRef: + type: string + targetDatabaseRefs: + type: string + type: object + type: array + type: object + loadBalancer: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + primaryDatabaseRef: + type: string + protectionMode: + enum: + - MaxPerformance + - MaxAvailability + type: string + serviceAnnotations: + additionalProperties: + type: string + type: object + setAsPrimaryDatabase: + type: string + standbyDatabaseRefs: + items: + type: string + type: array + required: + - primaryDatabaseRef + - protectionMode + - standbyDatabaseRefs + type: object + status: + description: DataguardBrokerStatus defines the observed state of DataguardBroker + properties: + clusterConnectString: + type: string + externalConnectString: + type: string + primaryDatabase: + type: string + primaryDatabaseRef: + type: string + protectionMode: + type: string + standbyDatabases: + type: string + status: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/database.oracle.com_oraclerestdataservices.yaml b/config/database.oracle.com_oraclerestdataservices.yaml new file mode 100644 index 00000000..121383fd --- /dev/null +++ b/config/database.oracle.com_oraclerestdataservices.yaml @@ -0,0 +1,224 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: oraclerestdataservices.database.oracle.com +spec: + group: database.oracle.com + names: + kind: OracleRestDataService + listKind: OracleRestDataServiceList + plural: oraclerestdataservices + singular: oraclerestdataservice + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .spec.databaseRef + name: Database + type: string + - jsonPath: .status.databaseApiUrl + name: Database API URL + type: string + - jsonPath: .status.databaseActionsUrl + name: Database Actions URL + type: string + - jsonPath: .status.apexUrl + name: Apex URL + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: OracleRestDataService is the Schema for the oraclerestdataservices + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: OracleRestDataServiceSpec defines the desired state of OracleRestDataService + properties: + adminPassword: + description: OracleRestDataServicePassword defines the secret containing + Password mapped to secretKey + properties: + keepSecret: + type: boolean + secretKey: + default: oracle_pwd + type: string + secretName: + type: string + required: + - secretName + type: object + apexPassword: + description: OracleRestDataServicePassword defines the secret containing + Password mapped to secretKey + properties: + keepSecret: + type: boolean + secretKey: + default: oracle_pwd + type: string + secretName: + type: string + required: + - secretName + type: object + databaseRef: + type: string + image: + description: OracleRestDataServiceImage defines the Image source and + pullSecrets for POD + properties: + pullFrom: + type: string + pullSecrets: + type: string + version: + type: string + required: + - pullFrom + type: object + loadBalancer: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + oracleService: + type: string + ordsPassword: + description: OracleRestDataServicePassword defines the secret containing + Password mapped to secretKey + properties: + keepSecret: + type: boolean + secretKey: + default: oracle_pwd + type: string + secretName: + type: string + required: + - secretName + type: object + ordsUser: + type: string + persistence: + description: OracleRestDataServicePersistence defines the storage + releated params + properties: + accessMode: + enum: + - ReadWriteOnce + - ReadWriteMany + type: string + size: + type: string + storageClass: + type: string + volumeName: + type: string + type: object + replicas: + minimum: 1 + type: integer + restEnableSchemas: + items: + description: OracleRestDataServicePDBSchemas defines the PDB Schemas + to be ORDS Enabled + properties: + enable: + type: boolean + pdbName: + type: string + schemaName: + type: string + urlMapping: + type: string + required: + - enable + - schemaName + type: object + type: array + serviceAccountName: + type: string + serviceAnnotations: + additionalProperties: + type: string + type: object + required: + - adminPassword + - databaseRef + - ordsPassword + type: object + status: + description: OracleRestDataServiceStatus defines the observed state of + OracleRestDataService + properties: + apexConfigured: + type: boolean + apexUrl: + type: string + commonUsersCreated: + type: boolean + databaseActionsUrl: + type: string + databaseApiUrl: + type: string + databaseRef: + type: string + image: + description: OracleRestDataServiceImage defines the Image source and + pullSecrets for POD + properties: + pullFrom: + type: string + pullSecrets: + type: string + version: + type: string + required: + - pullFrom + type: object + loadBalancer: + type: string + ordsInstalled: + type: boolean + replicas: + type: integer + serviceIP: + type: string + status: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file' + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/database.oracle.com_pdbs.yaml b/config/database.oracle.com_pdbs.yaml new file mode 100644 index 00000000..85af8c1b --- /dev/null +++ b/config/database.oracle.com_pdbs.yaml @@ -0,0 +1,383 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: pdbs.database.oracle.com +spec: + group: database.oracle.com + names: + kind: PDB + listKind: PDBList + plural: pdbs + singular: pdb + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The connect string to be used + jsonPath: .status.connString + name: Connect_String + type: string + - description: Name of the CDB + jsonPath: .spec.cdbName + name: CDB Name + type: string + - description: Name of the PDB + jsonPath: .spec.pdbName + name: PDB Name + type: string + - description: PDB Open Mode + jsonPath: .status.openMode + name: PDB State + type: string + - description: Total Size of the PDB + jsonPath: .status.totalSize + name: PDB Size + type: string + - description: Status of the PDB Resource + jsonPath: .status.phase + name: Status + type: string + - description: Error message, if any + jsonPath: .status.msg + name: Message + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: PDB is the Schema for the pdbs API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: PDBSpec defines the desired state of PDB + properties: + action: + description: 'Action to be taken: Create/Clone/Plug/Unplug/Delete/Modify/Status/Map. + Map is used to map a Databse PDB to a Kubernetes PDB CR.' + enum: + - Create + - Clone + - Plug + - Unplug + - Delete + - Modify + - Status + - Map + type: string + adminName: + description: The administrator username for the new PDB. This property + is required when the Action property is Create. + properties: + secret: + description: PDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + adminPwd: + description: The administrator password for the new PDB. This property + is required when the Action property is Create. + properties: + secret: + description: PDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + asClone: + description: Indicate if 'AS CLONE' option should be used in the command + to plug in a PDB. This property is applicable when the Action property + is PLUG but not required. + type: boolean + assertivePdbDeletion: + description: turn on the assertive approach to delete pdb resource + kubectl delete pdb ..... automatically triggers the pluggable database + deletion + type: boolean + cdbName: + description: Name of the CDB + type: string + cdbNamespace: + description: CDB Namespace + type: string + cdbResName: + description: Name of the CDB Custom Resource that runs the ORDS container + type: string + copyAction: + description: To copy files or not while cloning a PDB + enum: + - COPY + - NOCOPY + - MOVE + type: string + dropAction: + description: Specify if datafiles should be removed or not. The value + can be INCLUDING or KEEP (default). + enum: + - INCLUDING + - KEEP + type: string + fileNameConversions: + description: Relevant for Create and Plug operations. As defined in + the Oracle Multitenant Database documentation. Values can be a + filename convert pattern or NONE. + type: string + getScript: + description: Whether you need the script only or execute the script + type: boolean + modifyOption: + description: Extra options for opening and closing a PDB + enum: + - IMMEDIATE + - NORMAL + - READ ONLY + - READ WRITE + - RESTRICTED + type: string + pdbName: + description: The name of the new PDB. Relevant for both Create and + Plug Actions. + type: string + pdbState: + description: The target state of the PDB + enum: + - OPEN + - CLOSE + type: string + pdbTlsCat: + properties: + secret: + description: PDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbTlsCrt: + properties: + secret: + description: PDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbTlsKey: + properties: + secret: + description: PDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + reuseTempFile: + description: Whether to reuse temp file + type: boolean + sourceFileNameConversions: + description: This property is required when the Action property is + Plug. As defined in the Oracle Multitenant Database documentation. + Values can be a source filename convert pattern or NONE. + type: string + sparseClonePath: + description: A Path specified for sparse clone snapshot copy. (Optional) + type: string + srcPdbName: + description: Name of the Source PDB from which to clone + type: string + tdeExport: + description: TDE export for unplug operations + type: boolean + tdeImport: + description: TDE import for plug operations + type: boolean + tdeKeystorePath: + description: TDE keystore path is required if the tdeImport or tdeExport + flag is set to true. Can be used in plug or unplug operations. + type: string + tdePassword: + description: TDE password if the tdeImport or tdeExport flag is set + to true. Can be used in create, plug or unplug operations + properties: + secret: + description: PDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + tdeSecret: + description: TDE secret is required if the tdeImport or tdeExport + flag is set to true. Can be used in plug or unplug operations. + properties: + secret: + description: PDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + tempSize: + description: Relevant for Create and Clone operations. Total size + for temporary tablespace as defined in the Oracle Multitenant Database + documentation. See size_clause description in Database SQL Language + Reference documentation. + type: string + totalSize: + description: Relevant for create and plug operations. Total size as + defined in the Oracle Multitenant Database documentation. See size_clause + description in Database SQL Language Reference documentation. + type: string + unlimitedStorage: + description: Relevant for Create and Plug operations. True for unlimited + storage. Even when set to true, totalSize and tempSize MUST be specified + in the request if Action is Create. + type: boolean + webServerPwd: + description: Password for the Web ServerPDB User + properties: + secret: + description: PDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerUser: + description: Web Server User with SQL Administrator role to allow + us to authenticate to the PDB Lifecycle Management REST endpoints + properties: + secret: + description: PDBSecret defines the secretName + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + xmlFileName: + description: XML metadata filename to be used for Plug or Unplug operations + type: string + required: + - action + type: object + status: + description: PDBStatus defines the observed state of PDB + properties: + action: + description: Last Completed Action + type: string + connString: + description: PDB Connect String + type: string + modifyOption: + description: Modify Option of the PDB + type: string + msg: + description: Message + type: string + openMode: + description: Open mode of the PDB + type: string + phase: + description: Phase of the PDB Resource + type: string + status: + description: PDB Resource Status + type: boolean + totalSize: + description: Total size of the PDB + type: string + required: + - phase + - status + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/database.oracle.com_shardingdatabases.yaml b/config/database.oracle.com_shardingdatabases.yaml new file mode 100644 index 00000000..641629a0 --- /dev/null +++ b/config/database.oracle.com_shardingdatabases.yaml @@ -0,0 +1,688 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: shardingdatabases.database.oracle.com +spec: + group: database.oracle.com + names: + kind: ShardingDatabase + listKind: ShardingDatabaseList + plural: shardingdatabases + singular: shardingdatabase + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.gsm.state + name: Gsm State + type: string + - jsonPath: .status.gsm.services + name: Services + type: string + - jsonPath: .status.gsm.shards + name: shards + priority: 1 + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: ShardingDatabase is the Schema for the shardingdatabases API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ShardingDatabaseSpec defines the desired state of ShardingDatabase + properties: + InvitedNodeSubnet: + type: string + catalog: + items: + description: CatalogSpec defines the desired state of CatalogSpec + properties: + envVars: + items: + description: EnvironmentVariable represents a named variable + accessible for containers. + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + imagePullPolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + isDelete: + type: string + label: + type: string + name: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + pvAnnotations: + additionalProperties: + type: string + type: object + pvMatchLabels: + additionalProperties: + type: string + type: object + pvcName: + type: string + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only + be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + storageSizeInGb: + format: int32 + type: integer + required: + - name + type: object + type: array + dbEdition: + type: string + dbImage: + type: string + dbImagePullSecret: + type: string + dbSecret: + description: Secret Details + properties: + encryptionType: + type: string + keyFileMountLocation: + type: string + keyFileName: + type: string + keySecretName: + type: string + name: + type: string + nsConfigMap: + type: string + nsSecret: + type: string + pwdFileMountLocation: + type: string + pwdFileName: + type: string + required: + - name + - pwdFileName + type: object + fssStorageClass: + type: string + gsm: + items: + description: GsmSpec defines the desired state of GsmSpec + properties: + directorName: + type: string + envVars: + description: Replicas int32 `json:"replicas,omitempty"` // + Gsm Replicas. If you set OraGsmPvcName then it is set default + to 1. + items: + description: EnvironmentVariable represents a named variable + accessible for containers. + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + imagePullPolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + isDelete: + type: string + label: + type: string + name: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + pvMatchLabels: + additionalProperties: + type: string + type: object + pvcName: + type: string + region: + type: string + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only + be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + storageSizeInGb: + format: int32 + type: integer + required: + - name + type: object + type: array + gsmDevMode: + type: string + gsmImage: + type: string + gsmImagePullSecret: + type: string + gsmService: + items: + description: Service Definition + properties: + available: + type: string + clbGoal: + type: string + commitOutcome: + type: string + drainTimeout: + type: string + dtp: + type: string + edition: + type: string + failoverDelay: + type: string + failoverMethod: + type: string + failoverPrimary: + type: string + failoverRestore: + type: string + failoverRetry: + type: string + failoverType: + type: string + gdsPool: + type: string + lag: + type: integer + locality: + type: string + name: + type: string + notification: + type: string + pdbName: + type: string + policy: + type: string + preferred: + type: string + prferredAll: + type: string + regionFailover: + type: string + retention: + type: string + role: + type: string + sessionState: + type: string + sqlTransactionProfile: + type: string + stopOption: + type: string + tableFamily: + type: string + tfaPolicy: + type: string + required: + - name + type: object + type: array + gsmShardGroup: + items: + properties: + deployAs: + type: string + name: + type: string + region: + type: string + required: + - name + type: object + type: array + gsmShardSpace: + items: + description: ShardSpace Specs + properties: + chunks: + type: integer + name: + type: string + protectionMode: + type: string + shardGroup: + type: string + required: + - name + type: object + type: array + invitedNodeSubnetFlag: + type: string + isClone: + type: boolean + isDataGuard: + type: boolean + isDebug: + type: boolean + isDeleteOraPvc: + type: boolean + isDownloadScripts: + type: boolean + isExternalSvc: + type: boolean + isTdeWallet: + type: string + liveinessCheckPeriod: + type: integer + namespace: + type: string + portMappings: + items: + description: PortMapping is a specification of port mapping for + an application deployment. + properties: + port: + format: int32 + type: integer + protocol: + default: TCP + type: string + targetPort: + format: int32 + type: integer + required: + - port + - protocol + - targetPort + type: object + type: array + readinessCheckPeriod: + type: integer + replicationType: + type: string + scriptsLocation: + type: string + shard: + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file' + items: + description: ShardSpec is a specification of Shards for an application + deployment. + properties: + deployAs: + type: string + envVars: + items: + description: EnvironmentVariable represents a named variable + accessible for containers. + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + imagePullPolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + isDelete: + enum: + - enable + - disable + - failed + - force + type: string + label: + type: string + name: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + pvAnnotations: + additionalProperties: + type: string + type: object + pvMatchLabels: + additionalProperties: + type: string + type: object + pvcName: + type: string + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only + be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + shardGroup: + type: string + shardRegion: + type: string + shardSpace: + type: string + storageSizeInGb: + format: int32 + type: integer + required: + - name + type: object + type: array + shardBuddyRegion: + type: string + shardConfigName: + type: string + shardRegion: + items: + type: string + type: array + shardingType: + type: string + stagePvcName: + type: string + storageClass: + type: string + tdeWalletPvc: + type: string + tdeWalletPvcMountLocation: + type: string + required: + - catalog + - dbImage + - gsm + - gsmImage + - shard + type: object + status: + description: To understand Metav1.Condition, please refer the link https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1 + ShardingDatabaseStatus defines the observed state of ShardingDatabase + properties: + catalogs: + additionalProperties: + type: string + type: object + conditions: + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n \ttype FooStatus struct{ \t // Represents the observations + of a foo's current state. \t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\" \t // + +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map + \t // +listMapKey=type \t Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields + \t}" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + gsm: + properties: + details: + additionalProperties: + type: string + type: object + externalConnectStr: + type: string + internalConnectStr: + type: string + services: + type: string + shards: + additionalProperties: + type: string + type: object + state: + type: string + type: object + shards: + additionalProperties: + type: string + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/database.oracle.com_singleinstancedatabases.yaml b/config/database.oracle.com_singleinstancedatabases.yaml new file mode 100644 index 00000000..1c011e17 --- /dev/null +++ b/config/database.oracle.com_singleinstancedatabases.yaml @@ -0,0 +1,421 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: singleinstancedatabases.database.oracle.com +spec: + group: database.oracle.com + names: + kind: SingleInstanceDatabase + listKind: SingleInstanceDatabaseList + plural: singleinstancedatabases + singular: singleinstancedatabase + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.edition + name: Edition + type: string + - jsonPath: .status.sid + name: Sid + priority: 1 + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.role + name: Role + type: string + - jsonPath: .status.releaseUpdate + name: Version + type: string + - jsonPath: .status.connectString + name: Connect Str + type: string + - jsonPath: .status.pdbConnectString + name: Pdb Connect Str + priority: 1 + type: string + - jsonPath: .status.tcpsConnectString + name: TCPS Connect Str + type: string + - jsonPath: .status.tcpsPdbConnectString + name: TCPS Pdb Connect Str + priority: 1 + type: string + - jsonPath: .status.oemExpressUrl + name: Oem Express Url + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: SingleInstanceDatabase is the Schema for the singleinstancedatabases + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: SingleInstanceDatabaseSpec defines the desired state of SingleInstanceDatabase + properties: + adminPassword: + description: SingleInsatnceAdminPassword defines the secret containing + Admin Password mapped to secretKey for Database + properties: + keepSecret: + type: boolean + secretKey: + default: oracle_pwd + type: string + secretName: + type: string + required: + - secretName + type: object + archiveLog: + type: boolean + charset: + type: string + createAs: + enum: + - primary + - standby + - clone + type: string + dgBrokerConfigured: + type: boolean + edition: + enum: + - standard + - enterprise + - express + - free + type: string + enableTCPS: + type: boolean + flashBack: + type: boolean + forceLog: + type: boolean + image: + description: SingleInstanceDatabaseImage defines the Image source + and pullSecrets for POD + properties: + prebuiltDB: + type: boolean + pullFrom: + type: string + pullSecrets: + type: string + version: + type: string + required: + - pullFrom + type: object + initParams: + description: SingleInstanceDatabaseInitParams defines the Init Parameters + properties: + cpuCount: + type: integer + pgaAggregateTarget: + type: integer + processes: + type: integer + sgaTarget: + type: integer + type: object + listenerPort: + type: integer + loadBalancer: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + pdbName: + type: string + persistence: + description: SingleInstanceDatabasePersistence defines the storage + size and class for PVC + properties: + accessMode: + enum: + - ReadWriteOnce + - ReadWriteMany + type: string + datafilesVolumeName: + type: string + scriptsVolumeName: + type: string + setWritePermissions: + type: boolean + size: + type: string + storageClass: + type: string + volumeClaimAnnotation: + type: string + type: object + primaryDatabaseRef: + type: string + readinessCheckPeriod: + type: integer + replicas: + type: integer + resources: + properties: + limits: + properties: + cpu: + type: string + memory: + type: string + type: object + requests: + properties: + cpu: + type: string + memory: + type: string + type: object + type: object + serviceAccountName: + type: string + serviceAnnotations: + additionalProperties: + type: string + type: object + sid: + description: SID must be alphanumeric (no special characters, only + a-z, A-Z, 0-9), and no longer than 12 characters. + maxLength: 12 + pattern: ^[a-zA-Z0-9]+$ + type: string + tcpsCertRenewInterval: + type: string + tcpsListenerPort: + type: integer + tcpsTlsSecret: + type: string + required: + - image + type: object + status: + description: SingleInstanceDatabaseStatus defines the observed state of + SingleInstanceDatabase + properties: + apexInstalled: + type: boolean + archiveLog: + type: string + certCreationTimestamp: + type: string + certRenewInterval: + type: string + charset: + type: string + clientWalletLoc: + type: string + clusterConnectString: + type: string + conditions: + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n \ttype FooStatus struct{ \t // Represents the observations + of a foo's current state. \t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\" \t // + +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map + \t // +listMapKey=type \t Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields + \t}" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + connectString: + type: string + createdAs: + type: string + datafilesCreated: + default: "false" + type: string + datafilesPatched: + default: "false" + type: string + dgBrokerConfigured: + type: boolean + edition: + type: string + flashBack: + type: string + forceLog: + type: string + initParams: + description: SingleInstanceDatabaseInitParams defines the Init Parameters + properties: + cpuCount: + type: integer + pgaAggregateTarget: + type: integer + processes: + type: integer + sgaTarget: + type: integer + type: object + initPgaSize: + type: integer + initSgaSize: + type: integer + isTcpsEnabled: + default: false + type: boolean + nodes: + items: + type: string + type: array + oemExpressUrl: + type: string + ordsReference: + type: string + pdbConnectString: + type: string + pdbName: + type: string + persistence: + description: SingleInstanceDatabasePersistence defines the storage + size and class for PVC + properties: + accessMode: + enum: + - ReadWriteOnce + - ReadWriteMany + type: string + datafilesVolumeName: + type: string + scriptsVolumeName: + type: string + setWritePermissions: + type: boolean + size: + type: string + storageClass: + type: string + volumeClaimAnnotation: + type: string + type: object + prebuiltDB: + type: boolean + primaryDatabase: + type: string + releaseUpdate: + type: string + replicas: + type: integer + role: + type: string + sid: + type: string + standbyDatabases: + additionalProperties: + type: string + type: object + status: + type: string + tcpsConnectString: + type: string + tcpsPdbConnectString: + type: string + tcpsTlsSecret: + default: "" + type: string + required: + - isTcpsEnabled + - persistence + - tcpsTlsSecret + type: object + type: object + served: true + storage: true + subresources: + scale: + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index e7ed68ce..2aed83d4 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -8,5 +8,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization images: - name: controller - newName: phx.ocir.io/intsanjaysingh/db-repo/oracle/database - newTag: sharding-operator + newName: container-registry.oracle.com/database/operator + newTag: latest diff --git a/config/manifests/bases/oracle-database-operator.clusterserviceversion.yaml b/config/manifests/bases/oracle-database-operator.clusterserviceversion.yaml index 6ec37dd1..23cd7c00 100644 --- a/config/manifests/bases/oracle-database-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/oracle-database-operator.clusterserviceversion.yaml @@ -3,25 +3,98 @@ kind: ClusterServiceVersion metadata: annotations: alm-examples: '[]' - capabilities: Basic Install + capabilities: Seamless Upgrades operators.operatorframework.io/builder: operator-sdk-v1.2.0 operators.operatorframework.io/project_layout: go.kubebuilder.io/v2 - name: oracle-database-operator.v0.0.0 - namespace: placeholder + name: oracle-database-operator.v1.1.0 + namespace: oracle-database-operator-system spec: apiservicedefinitions: {} customresourcedefinitions: owned: + - description: DbcsSystem is the Schema for the dbcssystems API + displayName: Dbcs System + kind: DbcsSystem + name: DbcsSystem.database.oracle.com + version: v1alpha1 + - description: AutonomousContainerDatabase is the Schema for the autonomouscontainerdatabases + API + displayName: Autonomous Container Database + kind: AutonomousContainerDatabase + name: autonomouscontainerdatabases.database.oracle.com + version: v1alpha1 + - description: AutonomousDatabaseBackup is the Schema for the autonomousdatabasebackups + API + displayName: Autonomous Database Backup + kind: AutonomousDatabaseBackup + name: autonomousdatabasebackups.database.oracle.com + version: v1alpha1 + - description: AutonomousDatabaseRestore is the Schema for the autonomousdatabaserestores + API + displayName: Autonomous Database Restore + kind: AutonomousDatabaseRestore + name: autonomousdatabaserestores.database.oracle.com + version: v1alpha1 - description: AutonomousDatabase is the Schema for the autonomousdatabases API displayName: Autonomous Database kind: AutonomousDatabase name: autonomousdatabases.database.oracle.com version: v1alpha1 - description: Operator to manage Oracle sharding - displayName: Oracle Sharding DB Operator + - description: CDB is the Schema for the cdbs API + displayName: CDB + kind: CDB + name: cdbs.database.oracle.com + version: v1alpha1 + - description: DatabaseObserver is the Schema for the databaseobservers API + displayName: Database Observer + kind: DatabaseObserver + name: databaseobservers.observability.oracle.com + version: v1alpha1 + - description: DataguardBroker is the Schema for the dataguardbrokers API + displayName: Dataguard Broker + kind: DataguardBroker + name: dataguardbrokers.database.oracle.com + version: v1alpha1 + - description: OracleRestDataService is the Schema for the oraclerestdataservices + API + displayName: Oracle Rest Data Service + kind: OracleRestDataService + name: oraclerestdataservices.database.oracle.com + version: v1alpha1 + - description: PDB is the Schema for the pdbs API + displayName: PDB + kind: PDB + name: pdbs.database.oracle.com + version: v1alpha1 + - description: ShardingDatabase is the Schema for the shardingdatabases API + displayName: Sharding Database + kind: ShardingDatabase + name: shardingdatabases.database.oracle.com + version: v1alpha1 + - description: SingleInstanceDatabase is the Schema for the singleinstancedatabases + API + displayName: Single Instance Database + kind: SingleInstanceDatabase + name: singleinstancedatabases.database.oracle.com + version: v1alpha1 + description: | + As part of Oracle's resolution to make Oracle Database Kubernetes native (that is, observable and operable by Kubernetes), Oracle released Oracle Database Operator for Kubernetes (OraOperator or the operator). OraOperator extends the Kubernetes API with custom resources and controllers for automating Oracle Database lifecycle management. + In this v1.1.0 production release, OraOperator supports the following database configurations and infrastructure: + ## Oracle Autonomous Database: + * Oracle Autonomous Database shared Oracle Cloud Infrastructure (OCI) (ADB-S) + * Oracle Autonomous Database on dedicated Cloud infrastructure (ADB-D) + * Oracle Autonomous Container Database (ACD) (infrastructure) is the infrastructure for provisioning Autonomous Databases. + * Containerized Single Instance databases (SIDB) deployed in the Oracle Kubernetes Engine (OKE) and any k8s where OraOperator is deployed + * Containerized Sharded databases (SHARDED) deployed in OKE and any k8s where OraOperator is deployed + * Oracle Multitenant Databases (CDB/PDBs) + * Oracle Base Database Cloud Service (BDBCS) + * Oracle Data Guard (Preview status) + * Oracle Database Observability (Preview status) + * Oracle will continue to extend OraOperator to support additional Oracle Database configurations. + displayName: Oracle Database Operator icon: - - base64data: "" - mediatype: "" + - base64data: iVBORw0KGgoAAAANSUhEUgAAALQAAAC0CAYAAAA9zQYyAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAABmJLR0QAAAAAAAD5Q7t/AAAJjUlEQVR42u3cfcwcRQHH8S9PH0BokZfCVBgpOgjyFjRoQIQQkLeA0PLWqgQMFDVgja9AChIKKCEKSgQEQVsQJGKxtNCAvAi2vJiCqAQMUpQRMKM4vFiCQEUo/jH7kOt19m7vbveK8fdJLukzMzuzczc7OzszWxAREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREZH/X2tVSRStmwi8B5gErN1nWS8DAVhmgl9ZsdwpVc+xoteKc/iTCf7VujKN1o0A+xef5cDPTfCP1XjeY+VsAWwFTATGDZjdPSb4F6J1U9sjTPA31n3uXeq1MfBe4F30376ADo0lWjcBOAa4EHhHzXW4FzgDuNsE/2aHc3gJmFBz2WMuBc4ywT87SCbRunWAB4APtEV92gR/zaAnGa2bBJwInFVz/SeY4F+O1q32/Zvg6+xEyuq1PnA08F1gg7ryHSkpbDrwEnAZ9TdmgD2BxcDj0bptG8i/is8DMVq394D5zGb1xgxwdbRum34zjdaNi9adAjxD/Y15PxP8yzXn2UvdDiPdsX9IjY0ZOHOVKzFaNw64Apgx5DoelrvNNdxDt9rDBP/rXg+K1u0EPNIhyWPADp3uQiX5bggsIX+hDOp6E/z0lrKG1kMXQ7OLgJkNZL8SWO+tHjpatxbwM4bfmAEWRuuOWAPljrmvGGJVVlz8d3ZJth1wXI/5jgeW0UxjhnRnGrqifV1FM40ZYDcT/GujLQGnAUd2OGAu6Qd8oY/CxgGTgWOB3UvSzI/W7WiCf7RLXgcNUOnJwOUlcScCF/SQ10mAqZBubrTuVhP837sljNZB6lQmdUh2AbCUdMvu1Ssm+Of6OK4OXyb9/mV+DNxOf+0LE/yDUDwURuu2A/5YknYWcLEJ/pU6ahWt2wq4FtgjE/08YMZmQUqGHHua4O8boPx1gNuAvTPRI1WGB0UdnsxEPQzsnAm/HTjQBN8t36nAwpLoTwDzTfBv9Fv3kjIbH3JE6xzwREn0bOA7dY3px4Ycl5bEH2CC/1ZdjRnABP8UsBfwg0z0RODgusoqKf810uxNzsbdjm/pRXOOIP1A7Q4ADu2S7yjljXl7E/y8uhvzEF1YEj7FBH9OnQ+oI9G6zYF9MnEzTfB3NFG7ogf+ApDrsr7XRJlt/lYSvmGFY6cDu2XCZ5ngnwC+XXLcTcXDXpmyodheTcxpD0u0bjNgSibqZBP8orrLGyH1HjlXNFnRorc5KhPlioWcJo2WhL/e6aBo3abke+eVFL2QCX4F+eEMpDnXMsdnwu4ywd/T8HfRtLLv4pImChsFDsmEn2aCf73XzPrwUEn4+4Gep9F68LGS8G4PTN8vCf9oMZQBwAS/JFp3PTCtLd2MaN3ckmeAXIP+ZoPfwbCUDSFXFMO3Om03QlrkaLd4GDUtHsByPd7WTZUZrdsLuDUTdX+n5fBo3b6k4Ua7K0zw92fCy6an7o3WVV2seqRiurez3QfPopI5Jvhlo+SniPqaOunT05mwjmPZaN2WwKPAmy0fOvw99u/NOmR7aofyxgO/7OU4E/yz0brjSHOv7c4oPt3U9jC+Bk0aPItKvgZpDJ27zVZ5OKrLFpmwlzodYIL/K2k+egPgncX5bghsRJqp2KT4TAQ2LT6dGvONJvi7O8R/oyR8qgn+xQ7HXUP+wffrxSpjN+v195W+rQxj3vuQsd9hhDRJ326P3vLrTzGGOjoT9Zdux5rg7yWtxA3qsZJzGDvHXYCvZKIWAzd1OceVlE/X3VmsNnayfQ31W9MeaDj/W0zwN4/9MQosYvUv/YJo3UVVt3kOYAfyO/4qTVOZ4JcVu9EeAjbvo/xLSNNH/85FRuvWBsp67guBKRUfbM4jrcSucvqk1caxp/3rgE+2pfkqaWfi/7JfkO8w1m5i4mGUtGrWbhzwKdKKXiOKtf0rM1HPAbFqPib4GK17H2lRYv8Kh7xIWkj6kQned0n7JWB8JnwmaSfiFgzm4mjdomKxaQ6rN+jDo3UfMsH/dsBy1qSy/S4zaGBqeMQE/zTwu0zcT6J1u/WaYRVFr3YOsGsmema3JeJ2xUrmQcDFHZKdU9R3IxP86d0ac7Rua+D8XBRpP0iVi6eKecXFXXYneDBaN7mmsoau2MOyJBN1eTHjVKuxpe/PlMQvjdadUGGsV1mxcjSP8qf8Bf3ka4J/wwT/RdImmJwzSbv6uj5oFQ1sYUn0fkVZjwJn1/CV7ApM77Ik/1S0rq4LaE0o2+G3JFp3UjG0q8Vb49do3fnAyR3SngvcQ3rFqFfjAEtaaJjWId2uJvjftJxTX5uTonWHAzeURQMf7LT7LVp3LHB1Jup8E/ypLenWBVb08X3kbEbanLWU/J0L0uzP2aQ76qBTeq+a4B/ObU4CPlJTnVodTOpUypxH6smXD1JIa4MeBX5FfqFlGGaY4FcZUw+y264YLi3tkGQXE/zvM8dNIr0lkjO+faNWtG5n0sXai1syYfNN8EdF6zYhNeymHWmCv6GkQdftKtIo4GbgwCYLan9jZV1gPvDxIVSy1fEm+KvaAwfdPlqMg//cIcnhJviFbccsIr8dYD8TfLcN/ZVE6y4HPpeJOsAEf0dxUf2BNH/ehMXAPiZ4htSgNzbBLy+27l5Lfg9PLVZ5p7CYvjqU4b3V8B/gw7nGXIdi99umlO/FXRCtmzU29RatO4h8Y15QV2MuzCoJvz1aN8EE/w9gS3p74aAXx/T64D2AKSb45fDW1t1plD+zDWrf1V6SNcG/aYK/jDSmm917npU8Tdo7vH7TU1Im+OeBnShfBDkP+Gm0bgPyQwFIb7PUeU7/pHwx59wizQoT/CmAI793vF+fNcGHlr+P7zun7m4jrXO01h0T/BzSKu7pNZZ1nQn+rqr/L8dkYFvSvGu/T6T/Ap4CHjfBV9orEq07IRN8pwn+yV4LL17Q7PTj3UV+F95zTfw/FcVMSvb9zeIHz53/1sA2pEWZfmeermxfMIvW7Uh6EB3pL8tSN5vgn+mWqNibsw3wbvpvXwuqtisREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREREWnxX2ox1/vZSvwPAAAAJXRFWHRkYXRlOmNyZWF0ZQAyMDI0LTA4LTEzVDE5OjUyOjMxKzAwOjAwsDIMcAAAACV0RVh0ZGF0ZTptb2RpZnkAMjAyNC0wOC0xM1QxOTo1MjozMSswMDowMMFvtMwAAABVdEVYdHN2Zzpjb21tZW50ACBVcGxvYWRlZCB0bzogU1ZHIFJlcG8sIHd3dy5zdmdyZXBvLmNvbSwgR2VuZXJhdG9yOiBTVkcgUmVwbyBNaXhlciBUb29scyBFB1wTAAAAAElFTkSuQmCC + mediatype: png install: spec: deployments: null @@ -37,12 +110,12 @@ spec: type: AllNamespaces keywords: - Oracle - - sharding - - db + - Database + - Operator links: - name: Oracle Database Operator - url: https://oracle-database-operator.domain + url: https://github.com/oracle/oracle-database-operator maturity: alpha provider: - name: ShardingDatabase - version: 0.0.0 + name: Oracle + version: 1.2.0 diff --git a/config/observability.oracle.com_databaseobservers.yaml b/config/observability.oracle.com_databaseobservers.yaml new file mode 100644 index 00000000..b0801738 --- /dev/null +++ b/config/observability.oracle.com_databaseobservers.yaml @@ -0,0 +1,227 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: databaseobservers.observability.oracle.com +spec: + group: observability.oracle.com + names: + kind: DatabaseObserver + listKind: DatabaseObserverList + plural: databaseobservers + singular: databaseobserver + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.exporterConfig + name: ExporterConfig + type: string + - jsonPath: .status.status + name: Status + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: DatabaseObserver is the Schema for the databaseobservers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DatabaseObserverSpec defines the desired state of DatabaseObserver + properties: + database: + description: DatabaseObserverDatabase defines the database details + used for DatabaseObserver + properties: + dbConnectionString: + properties: + key: + type: string + secret: + type: string + type: object + dbPassword: + properties: + key: + type: string + secret: + type: string + vaultOCID: + type: string + vaultSecretName: + type: string + type: object + dbUser: + properties: + key: + type: string + secret: + type: string + type: object + dbWallet: + properties: + key: + type: string + secret: + type: string + type: object + type: object + exporter: + description: DatabaseObserverExporterConfig defines the configuration + details related to the exporters of DatabaseObserver + properties: + configuration: + properties: + configmap: + description: ConfigMapDetails defines the configmap name + properties: + configmapName: + type: string + key: + type: string + type: object + type: object + image: + type: string + service: + description: DatabaseObserverService defines the exporter service + component of DatabaseObserver + properties: + port: + format: int32 + type: integer + type: object + type: object + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + prometheus: + description: PrometheusConfig defines the generated resources for + Prometheus + properties: + labels: + additionalProperties: + type: string + type: object + port: + type: string + type: object + replicas: + format: int32 + type: integer + type: object + status: + description: DatabaseObserverStatus defines the observed state of DatabaseObserver + properties: + conditions: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n \ttype FooStatus struct{ \t // Represents the observations + of a foo's current state. \t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\" \t // + +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map + \t // +listMapKey=type \t Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields + \t}" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + exporterConfig: + type: string + replicas: + type: integer + status: + type: string + required: + - conditions + - exporterConfig + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 3bf0eab5..ac5e158f 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -5,17 +5,29 @@ ## Append samples you want in your CSV to this file as resources ## resources: - - onpremdb/pdb.yaml - - onpremdb/cdb.yaml - - adb/autonomousdatabase_create.yaml + - multitenant/pdb_plug.yaml + - multitenant/cdb_secret.yaml + - multitenant/pdb_secret.yaml + - multitenant/pdb_clone.yaml + - multitenant/cdb.yaml + - sidb/singleinstancedatabase_patch.yaml + - sidb/oraclerestdataservice_apex.yaml + - sidb/singleinstancedatabase_express.yaml + - sidb/singleinstancedatabase_secrets.yaml + - sidb/singleinstancedatabase_clone.yaml + - sidb/singleinstancedatabase_prebuiltdb.yaml + - sidb/dataguardbroker.yaml + - sidb/oraclerestdataservice_secrets.yaml + - sidb/singleinstancedatabase_free.yaml + - sidb/singleinstancedatabase_standby.yaml + - sidb/openshift_rbac.yaml + - sharding/sharding_v1alpha1_provshard_clonespec1.yaml + - sharding/shardingdatabase.yaml + - sharding/sharding_v1alpha1_provshard_clonespec.yaml + - observability/databaseobserver_vault.yaml + - observability/databaseobserver_minimal.yaml - adb/autonomousdatabase_bind.yaml - adb/autonomousdatabase_backup.yaml - adb/autonomousdatabase_restore.yaml - - acd/autonomouscontainerdatabase_create.yaml - - sidb/singleinstancedatabase.yaml - - sharding/shardingdatabase.yaml - - sharding/sharding_v1alpha1_provshard.yaml - - dbcs/database_v1alpha1_dbcssystem.yaml - - database_v1alpha1_dataguardbroker.yaml - - observability/databaseobserver.yaml + - acd/autonomouscontainerdatabase_restart_terminate.yaml # +kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/samples/sidb/singleinstancedatabase.yaml b/config/samples/sidb/singleinstancedatabase.yaml index b66082e1..4425acea 100644 --- a/config/samples/sidb/singleinstancedatabase.yaml +++ b/config/samples/sidb/singleinstancedatabase.yaml @@ -103,6 +103,21 @@ spec: ## Optionally specify a volume containing scripts in 'setup' and 'startup' folders to be executed during database setup and startup respectively. scriptsVolumeName: "" + ## Database pod resource details + ## cpu can be expressed in terms of cpu units and can be a plain integer or fractional value + ## memory is measured in bytes and can be expressed in plain integer or as a fixed-point number + ## using one of these quantity suffixes: E, P, T, G, M, k. + ## You can also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki. + resources: + ## requests denotes minimum node resources required/to be utilized by the database pod + requests: + cpu: + memory: + ## limits specifies the maximum node resources that can be utilized by the database pod + limits: + cpu: + memory: + ## Type of service . Applicable on cloud enviroments only ## if loadBalService : false, service type = "NodePort" else "LoadBalancer" loadBalancer: false diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index 175abd47..c150867f 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -130,6 +130,26 @@ webhooks: resources: - pdbs sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-database-oracle-com-v1alpha1-shardingdatabase + failurePolicy: Fail + name: mshardingdatabase.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - shardingdatabases + sideEffects: None - admissionReviewVersions: - v1 - v1beta1 @@ -343,6 +363,27 @@ webhooks: resources: - pdbs sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-database-oracle-com-v1alpha1-shardingdatabase + failurePolicy: Fail + name: vshardingdatabase.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + - DELETE + resources: + - shardingdatabases + sideEffects: None - admissionReviewVersions: - v1 - v1beta1 diff --git a/controllers/database/pdb_controller.go b/controllers/database/pdb_controller.go index 5c173740..f0b4fd46 100644 --- a/controllers/database/pdb_controller.go +++ b/controllers/database/pdb_controller.go @@ -44,6 +44,7 @@ import ( "crypto/tls" "crypto/x509" "encoding/json" + //"encoding/pem" "errors" "fmt" @@ -59,6 +60,7 @@ import ( "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + //metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -119,10 +121,13 @@ var ( ) const PDBFinalizer = "database.oracle.com/PDBfinalizer" +const ONE = 1 +const ZERO = 0 var tdePassword string var tdeSecret string var floodcontrol bool = false +var assertivePdbDeletion bool = false /* Global variable for assertive pdb deletion */ //+kubebuilder:rbac:groups=database.oracle.com,resources=pdbs,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=database.oracle.com,resources=pdbs/status,verbs=get;update;patch @@ -133,7 +138,6 @@ var floodcontrol bool = false // +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups='',resources=statefulsets/finalizers,verbs=get;list;watch;create;update;patch;delete - // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. // TODO(user): Modify the Reconcile function to compare the state specified by @@ -183,9 +187,9 @@ func (r *PDBReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.R } // Finalizer section - err = r.managePDBDeletion(ctx, req, pdb) + err = r.managePDBDeletion2(ctx, req, pdb) if err != nil { - log.Info("Reconcile queued") + log.Info("managePDBDeletion2 Error Deleting resource ") return requeueY, nil } @@ -652,6 +656,9 @@ func (r *PDBReconciler) createPDB(ctx context.Context, req ctrl.Request, pdb *db return nil } + pdbAdminName = strings.TrimSuffix(pdbAdminName, "\n") + pdbAdminPwd = strings.TrimSuffix(pdbAdminPwd, "\n") + values := map[string]string{ "method": "CREATE", "pdb_name": pdb.Spec.PDBName, @@ -681,7 +688,6 @@ func (r *PDBReconciler) createPDB(ctx context.Context, req ctrl.Request, pdb *db values["tdeSecret"] = tdeSecret } - //url := "https://"+ pdb.Spec.CDBNamespace + "." + pdb.Spec.CDBResName + "-ords:" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" url := "https://" + pdb.Spec.CDBResName + "-ords." + pdb.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" pdb.Status.TotalSize = pdb.Spec.TotalSize @@ -705,6 +711,10 @@ func (r *PDBReconciler) createPDB(ctx context.Context, req ctrl.Request, pdb *db ParseTnsAlias(&(pdb.Status.ConnString), &(pdb.Spec.PDBName)) } + assertivePdbDeletion = pdb.Spec.AssertivePdbDeletion + if pdb.Spec.AssertivePdbDeletion == true { + r.Recorder.Eventf(pdb, corev1.EventTypeNormal, "Created", "PDB '%s' assertive pdb deletion turned on", pdb.Spec.PDBName) + } log.Info("New connect strinng", "tnsurl", cdb.Spec.DBTnsurl) log.Info("Created PDB Resource", "PDB Name", pdb.Spec.PDBName) r.getPDBState(ctx, req, pdb) @@ -786,6 +796,11 @@ func (r *PDBReconciler) clonePDB(ctx context.Context, req ctrl.Request, pdb *dba ParseTnsAlias(&(pdb.Status.ConnString), &(pdb.Spec.PDBName)) } + assertivePdbDeletion = pdb.Spec.AssertivePdbDeletion + if pdb.Spec.AssertivePdbDeletion == true { + r.Recorder.Eventf(pdb, corev1.EventTypeNormal, "Clone", "PDB '%s' assertive pdb deletion turned on", pdb.Spec.PDBName) + } + log.Info("Cloned PDB successfully", "Source PDB Name", pdb.Spec.SrcPDBName, "Clone PDB Name", pdb.Spec.PDBName) r.getPDBState(ctx, req, pdb) return nil @@ -845,8 +860,6 @@ func (r *PDBReconciler) plugPDB(ctx context.Context, req ctrl.Request, pdb *dbap values["asClone"] = strconv.FormatBool(*(pdb.Spec.AsClone)) } - //url := "https://"+ pdb.Spec.CDBNamespace + "." + pdb.Spec.CDBResName + "-ords:" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" - //url := "https://" + pdb.Spec.CDBResName + "-ords:" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" url := "https://" + pdb.Spec.CDBResName + "-ords." + pdb.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" pdb.Status.TotalSize = pdb.Spec.TotalSize @@ -868,6 +881,11 @@ func (r *PDBReconciler) plugPDB(ctx context.Context, req ctrl.Request, pdb *dbap pdb.Status.ConnString = cdb.Spec.DBTnsurl } + assertivePdbDeletion = pdb.Spec.AssertivePdbDeletion + if pdb.Spec.AssertivePdbDeletion == true { + r.Recorder.Eventf(pdb, corev1.EventTypeNormal, "Plugged", "PDB '%s' assertive pdb deletion turned on", pdb.Spec.PDBName) + } + log.Info("Successfully plugged PDB", "PDB Name", pdb.Spec.PDBName) r.getPDBState(ctx, req, pdb) return nil @@ -915,7 +933,6 @@ func (r *PDBReconciler) unplugPDB(ctx context.Context, req ctrl.Request, pdb *db values["tdeExport"] = strconv.FormatBool(*(pdb.Spec.TDEExport)) } - //url := "https://" + pdb.Spec.CDBResName + "-ords:" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdb.Spec.PDBName + "/" url := "https://" + pdb.Spec.CDBResName + "-ords." + pdb.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdb.Spec.PDBName + "/" log.Info("CallAPI(url)", "url", url) @@ -1011,7 +1028,6 @@ func (r *PDBReconciler) modifyPDB(ctx context.Context, req ctrl.Request, pdb *db log.Info("PDB STATUS OPENMODE", "pdb.Status.OpenMode=", pdb.Status.OpenMode) pdbName := pdb.Spec.PDBName - //url := "https://" + pdb.Spec.CDBNamespace + "." + pdb.Spec.CDBResName + "-ords:" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdbName + "/status" url := "https://" + pdb.Spec.CDBResName + "-ords." + pdb.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdbName + "/status" pdb.Status.Phase = pdbPhaseModify @@ -1055,8 +1071,6 @@ func (r *PDBReconciler) getPDBState(ctx context.Context, req ctrl.Request, pdb * } pdbName := pdb.Spec.PDBName - //url := "https://"+ pdb.Spec.CDBNamespace + "." + pdb.Spec.CDBResName + "-ords:" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdbName + "/status" - //url := "https://" + pdb.Spec.CDBResName + "-ords:" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdbName + "/status" url := "https://" + pdb.Spec.CDBResName + "-ords." + pdb.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdbName + "/status" pdb.Status.Msg = "Getting PDB state" @@ -1108,8 +1122,6 @@ func (r *PDBReconciler) mapPDB(ctx context.Context, req ctrl.Request, pdb *dbapi } pdbName := pdb.Spec.PDBName - //url := "https://"+ pdb.Spec.CDBNamespace + "." + pdb.Spec.CDBResName + "-ords:" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdbName + "/" - //url := "https://" + pdb.Spec.CDBResName + "-ords:" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdbName + "/" url := "https://" + pdb.Spec.CDBResName + "-ords." + pdb.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdbName + "/" pdb.Status.Msg = "Mapping PDB" @@ -1129,12 +1141,15 @@ func (r *PDBReconciler) mapPDB(ctx context.Context, req ctrl.Request, pdb *dbapi log.Error(err, "Failed to get state of PDB :"+pdbName, "err", err.Error()) } - //fmt.Printf("%+v\n", objmap) totSizeInBytes := objmap["total_size"].(float64) totSizeInGB := totSizeInBytes / 1024 / 1024 / 1024 pdb.Status.OpenMode = objmap["open_mode"].(string) pdb.Status.TotalSize = fmt.Sprintf("%.2f", totSizeInGB) + "G" + assertivePdbDeletion = pdb.Spec.AssertivePdbDeletion + if pdb.Spec.AssertivePdbDeletion == true { + r.Recorder.Eventf(pdb, corev1.EventTypeNormal, "Mapped", "PDB '%s' assertive pdb deletion turned on", pdb.Spec.PDBName) + } if cdb.Spec.DBServer != "" { pdb.Status.ConnString = cdb.Spec.DBServer + ":" + strconv.Itoa(cdb.Spec.DBPort) + "/" + pdb.Spec.PDBName @@ -1188,49 +1203,72 @@ func (r *PDBReconciler) deletePDB(ctx context.Context, req ctrl.Request, pdb *db return nil } -/* -************************************************ +/************************************************* - Check PDB deletion - /*********************************************** -*/ -func (r *PDBReconciler) managePDBDeletion(ctx context.Context, req ctrl.Request, pdb *dbapi.PDB) error { - log := r.Log.WithValues("managePDBDeletion", req.NamespacedName) - - // Check if the PDB instance is marked to be deleted, which is - // indicated by the deletion timestamp being set. - isPDBMarkedToBeDeleted := pdb.GetDeletionTimestamp() != nil - if isPDBMarkedToBeDeleted { - log.Info("Marked to be deleted") - pdb.Status.Phase = pdbPhaseDelete - pdb.Status.Status = true - r.Status().Update(ctx, pdb) +**************************************************/ +func (r *PDBReconciler) managePDBDeletion2(ctx context.Context, req ctrl.Request, pdb *dbapi.PDB) error { + log := r.Log.WithValues("managePDBDeletion", req.NamespacedName) + if pdb.ObjectMeta.DeletionTimestamp.IsZero() { + if !controllerutil.ContainsFinalizer(pdb, PDBFinalizer) { + controllerutil.AddFinalizer(pdb, PDBFinalizer) + if err := r.Update(ctx, pdb); err != nil { + return err + } + } + } else { + log.Info("Pdb marked to be delted") if controllerutil.ContainsFinalizer(pdb, PDBFinalizer) { - // Remove PDBFinalizer. Once all finalizers have been - // removed, the object will be deleted. - log.Info("Removing finalizer") + if assertivePdbDeletion == true { + log.Info("Deleting pdb CRD: Assertive approach is turned on ") + cdb, err := r.getCDBResource(ctx, req, pdb) + if err != nil { + log.Error(err, "Cannont find cdb resource ", "err", err.Error()) + return err + } + + pdbName := pdb.Spec.PDBName + if pdb.Status.OpenMode == "READ WRITE" { + valuesclose := map[string]string{ + "state": "CLOSE", + "modifyOption": "IMMEDIATE", + "getScript": "FALSE"} + url := "https://" + pdb.Spec.CDBResName + "-ords." + pdb.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdbName + "/status" + _, errclose := r.callAPI(ctx, req, pdb, url, valuesclose, "POST") + if errclose != nil { + log.Info("Warning error closing pdb continue anyway") + } + } + + valuesdrop := map[string]string{ + "action": "INCLUDING", + "getScript": "FALSE"} + url := "https://" + pdb.Spec.CDBResName + "-ords." + pdb.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdbName + "/" + + log.Info("Call Delete()") + _, errdelete := r.callAPI(ctx, req, pdb, url, valuesdrop, "DELETE") + if errdelete != nil { + log.Error(errdelete, "Fail to delete pdb :"+pdb.Name, "err", err.Error()) + return errdelete + } + } /* END OF ASSERTIVE SECTION */ + + log.Info("Marked to be deleted") + pdb.Status.Phase = pdbPhaseDelete + pdb.Status.Status = true + r.Status().Update(ctx, pdb) + controllerutil.RemoveFinalizer(pdb, PDBFinalizer) - err := r.Update(ctx, pdb) - if err != nil { - log.Info("Could not remove finalizer", "err", err.Error()) + if err := r.Update(ctx, pdb); err != nil { + log.Info("Cannot remove finalizer") return err } - log.Info("Successfully removed PDB resource") - return nil - } - } - // Add finalizer for this CR - if !controllerutil.ContainsFinalizer(pdb, PDBFinalizer) { - log.Info("Adding finalizer") - controllerutil.AddFinalizer(pdb, PDBFinalizer) - err := r.Update(ctx, pdb) - if err != nil { - log.Info("Could not add finalizer", "err", err.Error()) - return err } - pdb.Status.Status = false + + return nil } + return nil } diff --git a/controllers/database/shardingdatabase_controller.go b/controllers/database/shardingdatabase_controller.go index 88823afe..7fcaac2b 100644 --- a/controllers/database/shardingdatabase_controller.go +++ b/controllers/database/shardingdatabase_controller.go @@ -94,6 +94,9 @@ type ShardingDatabaseReconciler struct { Namespace string } +var sentFailMsg = make(map[string]bool) +var sentCompleteMsg = make(map[string]bool) + // +kubebuilder:rbac:groups=database.oracle.com,resources=shardingdatabases,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=database.oracle.com,resources=shardingdatabases/status,verbs=get;update;patch // +kubebuilder:rbac:groups=database.oracle.com,resources=shardingdatabases/finalizers,verbs=get;create;update;patch;delete @@ -166,6 +169,7 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req r.osh = append(r.osh, osh) } defer r.setCrdLifeCycleState(instance, &result, &err, &stateType) + defer r.updateShardTopologyStatus(instance) // =============================== Check Deletion TimeStamp======== // Check if the ProvOShard instance is marked to be deleted, which is // // indicated by the deletion timestamp being set. @@ -286,7 +290,7 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req // if user set replicasize greater than 1 but also set instance.Spec.OraDbPvcName then only one service will be created and one pod for i = 0; i < int32(len(instance.Spec.Shard)); i++ { OraShardSpex = instance.Spec.Shard[i] - if OraShardSpex.IsDelete != true { + if !shardingv1.CheckIsDeleteFlag(OraShardSpex.IsDelete, instance, r.Log) { result, err = r.createService(instance, shardingv1.BuildServiceDefForShard(instance, 0, OraShardSpex, "local")) if err != nil { result = resultNq @@ -306,7 +310,7 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req if len(instance.Spec.Shard) > 0 { for i = 0; i < int32(len(instance.Spec.Shard)); i++ { OraShardSpex = instance.Spec.Shard[i] - if OraShardSpex.IsDelete != true { + if !shardingv1.CheckIsDeleteFlag(OraShardSpex.IsDelete, instance, r.Log) { result, err = r.deployStatefulSet(instance, shardingv1.BuildStatefulSetForShard(instance, OraShardSpex), "SHARD") if err != nil { result = resultNq @@ -327,6 +331,13 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req return result, err } + err = r.checkShardState(instance) + if err != nil { + err = nilErr + result = resultQ + return result, err + } + //set the Waiting state for Reconcile loop // Loop will be requeued only if Shard Statefulset is not ready or not configured. // Till that time Reconcilation loop will remain in blocked state @@ -380,7 +391,7 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req // ====================== Update Setup for Shard ============================== for i = 0; i < int32(len(instance.Spec.Shard)); i++ { OraShardSpex = instance.Spec.Shard[i] - if OraShardSpex.IsDelete != true { + if !shardingv1.CheckIsDeleteFlag(OraShardSpex.IsDelete, instance, r.Log) { sfSet, shardPod, err := r.validateShard(instance, OraShardSpex, int(i)) if err != nil { shardingv1.LogMessages("INFO", "Shard "+sfSet.Name+" is not in available state.", nil, instance, r.Log) @@ -413,18 +424,6 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req } } - // Calling updateShardTopology to update the entire sharding topology - // This is required because we just executed updateShard,updateCatalog and UpdateGsm - // If some state has changed it will update the topology - - err = r.updateShardTopologyStatus(instance) - if err != nil { - // time.Sleep(30 * time.Second) - result = resultQ - err = nilErr - return result, err - } - stateType = string(databasev1alpha1.CrdReconcileCompeleteState) // r.setCrdLifeCycleState(instance, &result, &err, stateType) // Set error to ni to avoid reconcilation state reconcilation error as we are passing err to setCrdLifeCycleState @@ -889,7 +888,7 @@ func (r *ShardingDatabaseReconciler) validateSpex(instance *databasev1alpha1.Sha if len(instance.Spec.Shard) > 0 { for i = 0; i < int32(len(instance.Spec.Shard)); i++ { OraShardSpex := instance.Spec.Shard[i] - if OraShardSpex.IsDelete != true { + if !shardingv1.CheckIsDeleteFlag(OraShardSpex.IsDelete, instance, r.Log) { err = r.checkShardSpace(instance, OraShardSpex) if err != nil { return err @@ -902,32 +901,6 @@ func (r *ShardingDatabaseReconciler) validateSpex(instance *databasev1alpha1.Sha } } - // Check Secret configuration - if instance.Spec.DbSecret == nil { - return fmt.Errorf("Secret specification cannot be null, you need to set secret details") - } else { - if len(instance.Spec.DbSecret.Name) == 0 { - return fmt.Errorf("instance.Spec.DbSecret.Name cannot be empty") - } - if len(instance.Spec.DbSecret.PwdFileName) == 0 { - return fmt.Errorf("instance.Spec.DbSecret.PwdFileName cannot be empty") - } - if strings.ToLower(instance.Spec.DbSecret.EncryptionType) != "base64" { - if strings.ToLower(instance.Spec.DbSecret.KeyFileName) == "" { - return fmt.Errorf("instance.Spec.DbSecret.KeyFileName cannot be empty") - } - } - if len(instance.Spec.DbSecret.PwdFileMountLocation) == 0 { - msg := "instance.Spec.DbSecret.PwdFileMountLocation is not set. Setting it to default " + shardingv1.GetSecretMount() - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - } - - if len(instance.Spec.DbSecret.KeyFileMountLocation) == 0 { - msg := "instance.Spec.DbSecret.KeyFileMountLocation is not set. Setting it to default " + shardingv1.GetSecretMount() - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - } - } - // Once the initial Spec is been validated then update the last Sucessful Spec err = instance.UpdateLastSuccessfulSpec(r.Client) if err != nil { @@ -1350,23 +1323,21 @@ func (r *ShardingDatabaseReconciler) validateShard(instance *databasev1alpha1.Sh } // This function updates the shard topology over all -func (r *ShardingDatabaseReconciler) updateShardTopologyStatus(instance *databasev1alpha1.ShardingDatabase) error { +func (r *ShardingDatabaseReconciler) updateShardTopologyStatus(instance *databasev1alpha1.ShardingDatabase) { //shardPod := &corev1.Pod{} //gsmSfSet := &appsv1.StatefulSet{} gsmPod := &corev1.Pod{} var err error _, _, err = r.validateCatalog(instance) if err != nil { - return err + } _, gsmPod, err = r.validateGsm(instance) if err != nil { - return err + } r.updateShardTopologyShardsInGsm(instance, gsmPod) - return nil - } func (r *ShardingDatabaseReconciler) updateShardTopologyShardsInGsm(instance *databasev1alpha1.ShardingDatabase, gsmPod *corev1.Pod) { @@ -1378,8 +1349,11 @@ func (r *ShardingDatabaseReconciler) updateShardTopologyShardsInGsm(instance *da if len(instance.Spec.Shard) > 0 { for i = 0; i < int32(len(instance.Spec.Shard)); i++ { OraShardSpex := instance.Spec.Shard[i] + if strings.ToLower(OraShardSpex.IsDelete) == "failed" { + continue + } // stateStr := shardingv1.GetGsmShardStatus(instance, OraShardSpex.Name) - if OraShardSpex.IsDelete != true { + if !shardingv1.CheckIsDeleteFlag(OraShardSpex.IsDelete, instance, r.Log) { shardSfSet, _, err = r.validateShard(instance, OraShardSpex, int(i)) if err != nil { continue @@ -1532,7 +1506,7 @@ func (r *ShardingDatabaseReconciler) addPrimaryShards(instance *databasev1alpha1 // stateStr := shardingv1.GetGsmShardStatus(instance, OraShardSpex.Name) // strings.Contains(stateStr, "DELETE") - if OraShardSpex.IsDelete != true { + if !shardingv1.CheckIsDeleteFlag(OraShardSpex.IsDelete, instance, r.Log) { if setLifeCycleFlag != true { setLifeCycleFlag = true stateType := string(databasev1alpha1.CrdReconcileWaitingState) @@ -1561,33 +1535,36 @@ func (r *ShardingDatabaseReconciler) addPrimaryShards(instance *databasev1alpha1 continue } + /** // Copy file from pod to FS - // configrest, kclientset, err := shardingv1.GetPodCopyConfig(r.kubeClient, r.kubeConfig, instance, r.Log) - // if err != nil { - // return fmt.Errorf("Error occurred in getting KubeConfig, cannot perform copy operation from the pod") - // } - - // _, _, err = shardingv1.ExecCommand(gsmPod.Name, shardingv1.GetTdeKeyLocCmd(), r.kubeClient, r.kubeConfig, instance, r.Log) - // if err != nil { - // fmt.Printf("Error occurred during the while getting the TDE key from the pod " + gsmPod.Name) - // //return err - // } - // fileName := "/tmp/tde_key" - // last := fileName[strings.LastIndex(fileName, "/")+1:] - // fileName1 := last - // fsLoc := shardingv1.TmpLoc + "/" + fileName1 - // _, _, _, err = shardingv1.KctlCopyFile(r.kubeClient, r.kubeConfig, instance, configrest, kclientset, r.Log, fmt.Sprintf("%s/%s:/%s", instance.Spec.Namespace, gsmPod.Name, fileName), fsLoc, "") - // if err != nil { - // fmt.Printf("failed to copy file") - // //return err - // } + configrest, kclientset, err := shardingv1.GetPodCopyConfig(r.kubeClient, r.kubeConfig, instance, r.Log) + if err != nil { + return fmt.Errorf("Error occurred in getting KubeConfig, cannot perform copy operation from the pod") + } + + _, _, err = shardingv1.ExecCommand(gsmPod.Name, shardingv1.GetTdeKeyLocCmd(), r.kubeClient, r.kubeConfig, instance, r.Log) + if err != nil { + fmt.Printf("Error occurred during the while getting the TDE key from the pod " + gsmPod.Name) + //return err + } + fileName := "/tmp/tde_key" + last := fileName[strings.LastIndex(fileName, "/")+1:] + fileName1 := last + fsLoc := shardingv1.TmpLoc + "/" + fileName1 + _, _, _, err = shardingv1.KctlCopyFile(r.kubeClient, r.kubeConfig, instance, configrest, kclientset, r.Log, fmt.Sprintf("%s/%s:/%s", instance.Spec.Namespace, gsmPod.Name, fileName), fsLoc, "") + if err != nil { + fmt.Printf("failed to copy file") + //return err + } // Copying it to Shard Pod - // _, _, _, err = shardingv1.KctlCopyFile(r.kubeClient, r.kubeConfig, instance, configrest, kclientset, r.Log, fsLoc, fmt.Sprintf("%s/%s:/%s", instance.Spec.Namespace, OraShardSpex.Name+"-0", fsLoc), "") - // if err != nil { - // fmt.Printf("failed to copy file") - // //return err - /// } + _, _, _, err = shardingv1.KctlCopyFile(r.kubeClient, r.kubeConfig, instance, configrest, kclientset, r.Log, fsLoc, fmt.Sprintf("%s/%s:/%s", instance.Spec.Namespace, OraShardSpex.Name+"-0", fsLoc), "") + if err != nil { + fmt.Printf("failed to copy file") + //return err + } + + **/ // If the shard doesn't exist in GSM then just add the shard statefulset and update GSM shard status // ADD Shard in GSM @@ -1598,7 +1575,12 @@ func (r *ShardingDatabaseReconciler) addPrimaryShards(instance *databasev1alpha1 r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev1alpha1.AddingShardErrorState)) title = "Shard Addition Failure" message = "Error occurred during shard " + shardingv1.GetFmtStr(OraShardSpex.Name) + " addition." - r.sendMessage(instance, title, message) + shardingv1.LogMessages("INFO", title+":"+message, nil, instance, r.Log) + if sentFailMsg[OraShardSpex.Name] != true { + r.sendMessage(instance, title, message) + } + sentFailMsg[OraShardSpex.Name] = true + sentCompleteMsg[OraShardSpex.Name] = false deployFlag = false } } @@ -1649,7 +1631,13 @@ func (r *ShardingDatabaseReconciler) verifyShards(instance *databasev1alpha1.Sha if oldStateStr != string(databasev1alpha1.ShardOnlineState) { title = "Shard Addition Completed" message = "Shard addition completed for shard " + shardingv1.GetFmtStr(shardSfSet.Name) + " in GSM." - r.sendMessage(instance, title, message) + shardingv1.LogMessages("INFO", title+":"+message, nil, instance, r.Log) + if sentCompleteMsg[shardSfSet.Name] != true { + r.sendMessage(instance, title, message) + } + + sentCompleteMsg[shardSfSet.Name] = true + sentFailMsg[shardSfSet.Name] = false } return nil } @@ -1680,7 +1668,7 @@ func (r *ShardingDatabaseReconciler) delGsmShard(instance *databasev1alpha1.Shar if len(instance.Spec.Shard) > 0 { for i = 0; i < int32(len(instance.Spec.Shard)); i++ { OraShardSpex := instance.Spec.Shard[i] - if OraShardSpex.IsDelete == true { + if shardingv1.CheckIsDeleteFlag(OraShardSpex.IsDelete, instance, r.Log) { if setLifeCycleFlag != true { setLifeCycleFlag = true stateType := string(databasev1alpha1.CrdReconcileWaitingState) @@ -1736,6 +1724,13 @@ func (r *ShardingDatabaseReconciler) delGsmShard(instance *databasev1alpha1.Shar title = "Chunk Movement Failure" message = "Error occurred during chunk movement in shard " + shardingv1.GetFmtStr(OraShardSpex.Name) + " deletion." r.sendMessage(instance, title, message) + instance.Spec.Shard[i].IsDelete = "failed" + err = shardingv1.InstanceShardPatch(instance, instance, r.Client, i, "isDelete", "failed") + if err != nil { + msg = "Error occurred while changing the isDelete value to failed in Spec struct" + shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) + return err + } continue } // 6th Step @@ -1743,13 +1738,26 @@ func (r *ShardingDatabaseReconciler) delGsmShard(instance *databasev1alpha1.Shar // This is a loop and will check unless there is a error or chunks has moved // Validate if the chunks has moved before performing shard deletion for { + msg = "Sleeping for 120 seconds and will check status again of chunks movement in gsm for shard: " + shardingv1.GetFmtStr(OraShardSpex.Name) + "ShardType=" + strings.TrimSpace(strings.ToUpper(instance.Spec.ShardingType)) + shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) + time.Sleep(120 * time.Second) err = shardingv1.VerifyChunks(gsmPod.Name, sparams, instance, r.kubeClient, r.kubeConfig, r.Log) if err == nil { break } else { - msg = "Sleeping for 120 seconds and will check status again of chunks movement in gsm for shard: " + shardingv1.GetFmtStr(OraShardSpex.Name) - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - time.Sleep(120 * time.Second) + if strings.TrimSpace(strings.ToUpper(instance.Spec.ShardingType)) != "USER" { + // If ShardingType is not "USER", do not perform the patching.. continue + continue + } + instance.Spec.Shard[i].IsDelete = "failed" + err = shardingv1.InstanceShardPatch(instance, instance, r.Client, i, "isDelete", "failed") + if err != nil { + // r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev1alpha1.ChunkMoveError)) + msg = "Error occurred while changing the isDelete value to failed in Spec struct" + shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) + // return err + } + return err } } } @@ -1764,6 +1772,7 @@ func (r *ShardingDatabaseReconciler) delGsmShard(instance *databasev1alpha1.Shar msg = "Error occurred during shard" + shardingv1.GetFmtStr(OraShardSpex.Name) + "removal from Gsm" shardingv1.LogMessages("Error", msg, nil, instance, r.Log) r.updateShardStatus(instance, int(i), string(databasev1alpha1.ShardRemoveError)) + instance.Spec.Shard[i].IsDelete = "failed" continue } @@ -1939,6 +1948,15 @@ func (r *ShardingDatabaseReconciler) deployStatefulSet(instance *databasev1alpha message := "Inside the deployStatefulSet function" shardingv1.LogMessages("DEBUG", message, nil, instance, r.Log) // See if StatefulSets already exists and create if it doesn't + // Error : invalid memory address or nil pointer dereference" (runtime error: invalid memory address or nil pointer dereference) + // This happens during unit test cases + for i := 0; i < 5; i++ { + if r.Scheme == nil { + time.Sleep(time.Second * 40) + } else { + break + } + } controllerutil.SetControllerReference(instance, dep, r.Scheme) found := &appsv1.StatefulSet{} err := r.Client.Get(context.TODO(), types.NamespacedName{ @@ -1974,3 +1992,58 @@ func (r *ShardingDatabaseReconciler) deployStatefulSet(instance *databasev1alpha return ctrl.Result{}, nil } + +func (r *ShardingDatabaseReconciler) checkShardState(instance *databasev1alpha1.ShardingDatabase) error { + + var i int32 + var err error = nil + var OraShardSpex databasev1alpha1.ShardSpec + var currState string + var eventMsg string + var msg string + + currState = "" + eventMsg = "" + + msg = "checkShardState():ShardType=" + strings.TrimSpace(strings.ToUpper(instance.Spec.ShardingType)) + shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) + if strings.TrimSpace(strings.ToUpper(instance.Spec.ShardingType)) != "USER" { + // ShardingType is not "USER", so return + return err + } + + if len(instance.Status.Gsm.Shards) > 0 { + for i = 0; i < int32(len(instance.Spec.Shard)); i++ { + OraShardSpex = instance.Spec.Shard[i] + currState = shardingv1.GetGsmShardStatus(instance, OraShardSpex.Name) + if currState == string(databasev1alpha1.AddingShardState) { + eventMsg = "Shard Addition in progress. Requeuing" + err = fmt.Errorf(eventMsg) + break + } else if currState == string(databasev1alpha1.DeletingState) { + eventMsg = "Shard Deletion in progress. Requeuing" + err = fmt.Errorf(eventMsg) + err = nil + break + } else if OraShardSpex.IsDelete == "failed" { + eventMsg = "Shard Deletion failed. Manual intervention required. Requeuing" + err = fmt.Errorf(eventMsg) + break + } else if currState == string(databasev1alpha1.DeleteErrorState) { + eventMsg = "Shard Deletion Error. Manual intervention required. Requeuing" + err = fmt.Errorf(eventMsg) + break + } else if currState == string(databasev1alpha1.ShardRemoveError) { + eventMsg = "Shard Deletion Error. Manual intervention required. Requeuing" + err = fmt.Errorf(eventMsg) + break + } else { + eventMsg = "checkShardState() : Shard State=[" + currState + "]" + shardingv1.LogMessages("INFO", eventMsg, nil, instance, r.Log) + err = nil + } + } + r.publishEvents(instance, eventMsg, currState) + } + return err +} diff --git a/controllers/database/singleinstancedatabase_controller.go b/controllers/database/singleinstancedatabase_controller.go index fa01ae7e..a20fa1fd 100644 --- a/controllers/database/singleinstancedatabase_controller.go +++ b/controllers/database/singleinstancedatabase_controller.go @@ -1096,6 +1096,38 @@ func (r *SingleInstanceDatabaseReconciler) instantiatePodSpec(m *dbapi.SingleIns } }(), + + Resources: func() corev1.ResourceRequirements { + if m.Spec.Resources.Requests != nil && m.Spec.Resources.Limits != nil { + return corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + "cpu": resource.MustParse(m.Spec.Resources.Requests.Cpu), + "memory": resource.MustParse(m.Spec.Resources.Requests.Memory), + }, + Limits: corev1.ResourceList{ + "cpu": resource.MustParse(m.Spec.Resources.Limits.Cpu), + "memory": resource.MustParse(m.Spec.Resources.Requests.Memory), + }, + } + } else if m.Spec.Resources.Requests != nil { + return corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + "cpu": resource.MustParse(m.Spec.Resources.Requests.Cpu), + "memory": resource.MustParse(m.Spec.Resources.Requests.Memory), + }, + } + } else if m.Spec.Resources.Limits != nil { + return corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + "cpu": resource.MustParse(m.Spec.Resources.Limits.Cpu), + "memory": resource.MustParse(m.Spec.Resources.Requests.Memory), + }, + } + } else { + return corev1.ResourceRequirements{} + } + + }(), }}, TerminationGracePeriodSeconds: func() *int64 { i := int64(30); return &i }(), diff --git a/controllers/observability/databaseobserver_resource.go b/controllers/observability/databaseobserver_resource.go index 75e05330..8c20ebe5 100644 --- a/controllers/observability/databaseobserver_resource.go +++ b/controllers/observability/databaseobserver_resource.go @@ -10,6 +10,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) @@ -100,8 +101,9 @@ func (resource *ObservabilityServiceResource) generate(api *apiv1.DatabaseObserv Selector: rSelector, Ports: []corev1.ServicePort{ { - Name: "metrics", - Port: rPort, + Name: "metrics", + Port: rPort, + TargetPort: intstr.FromInt32(constants.DefaultServiceTargetPort), }, }, }, diff --git a/docs/multitenant/README.md b/docs/multitenant/README.md index b20eaa4a..6c9a6756 100644 --- a/docs/multitenant/README.md +++ b/docs/multitenant/README.md @@ -1,31 +1,37 @@ +# Oracle Multitenant Database Controllers +The Oracle Database Operator for kubernetes uses two controllers to manage [Pluggable Database life cycle][oradocpdb] -# Oracle Multitenant Database Controller +- CDB controller +- PDB controller -> WARNING: Examples with https are located in the use case directories +By usigng CDB/PDB controllers you can perform the following actions **CREATE**,**MODIFY(OPEN/COSE)**,**DELETE**,**CLONE**,**PLUG** and **UNPLUG** -Detailed examples can be found here +This file examplains how to setup CDB and PDB controllers, additional details can be found in the README files under usecases directories.. -- [Usecase01](./usecase01) pdb crd and cdb pod are running in the same namesaoce -- [Usecase02](./usecase02) unplug and plug operation examples -- [Usecase03](./usecase03) multiple namespace example cdb pod ,pdb crd and pod operator are running in different namespaces +- [Usecase01][uc01] pdb crd and cdb pod are running in the same namesaoce +- [Usecase02][uc02] unplug and plug operation examples +- [Usecase03][uc03] multiple namespace example cdb pod ,pdb crd and pod operator are running in different namespaces. +> **NOTE** that there is no controller for Container Database Operations -CDBs and PDBs are part of the Oracle Database [Multitenant Architecture](https://docs.oracle.com/en/database/oracle/oracle-database/21/multi/introduction-to-the-multitenant-architecture.html#GUID-AB84D6C9-4BBE-4D36-992F-2BB85739329F). The Multitenant Database Controller is a feature of Oracle DB Operator for Kubernetes (`OraOperator`), which helps to manage the lifecycle of Pluggable Databases (PDBs) in an Oracle Container Database (CDB). +## Macro steps for setup -The target CDB for which PDB lifecycle management is needed can be running on a machine on-premises. To manage the PDBs of that target CDB, you can run the Oracle DB Operator on a Kubernetes system on-premises (For Example: [Oracle Linux Cloud Native Environment or OLCNE](https://docs.oracle.com/en/operating-systems/olcne/)). +- Deply the Oracle Database Operator +- Create Ords based image for CDB pod +- Container DB user creation +- Create secrets for credentials +- Create certificates for https connection +- Create CDB pod -NOTE: The target CDB can also run in a Cloud environment, such as an OCI [Oracle Base Database Service](https://docs.oracle.com/en-us/iaas/dbcs/doc/bare-metal-and-virtual-machine-db-systems.html)). To manage PDBs on the target CDB, the Oracle DB Operator can run on a Kubernetes Cluster running in the cloud, such as OCI's [Container Engine for Kubernetes or OKE](https://docs.oracle.com/en-us/iaas/Content/ContEng/Concepts/contengoverview.htm#Overview_of_Container_Engine_for_Kubernetes)) - - - -# Oracle DB Operator Multitenant Database Controller Deployment +## Oracle DB Operator Multitenant Database Controller Deployment To deploy OraOperator, use this [Oracle Database Operator for Kubernetes](https://github.com/oracle/oracle-database-operator/blob/main/README.md) step-by-step procedure. After the Oracle Database Operator is deployed, you can see the DB Operator Pods running in the Kubernetes Cluster. As part of the `OraOperator` deployment, the multitenant Database Controller is deployed. You can see the CRDs (Custom Resource Definition) for the CDB and PDBs in the list of CRDs. The following output is an example of such a deployment: + ```bash [root@test-server oracle-database-operator]# kubectl get ns NAME STATUS AGE @@ -36,7 +42,6 @@ kube-public Active 245d kube-system Active 245d oracle-database-operator-system Active 24h <<<< namespace to deploy the Oracle Database Operator - [root@test-server oracle-database-operator]# kubectl get all -n oracle-database-operator-system NAME READY STATUS RESTARTS AGE pod/oracle-database-operator-controller-manager-665874bd57-dlhls 1/1 Running 0 28s @@ -54,7 +59,6 @@ NAME DESIRED replicaset.apps/oracle-database-operator-controller-manager-665874bd57 3 3 3 29s [root@docker-test-server oracle-database-operator]# - [root@test-server oracle-database-operator]# kubectl get crd NAME CREATED AT autonomouscontainerdatabases.database.oracle.com 2022-06-22T01:21:36Z @@ -75,23 +79,18 @@ shardingdatabases.database.oracle.com 2022-06-22T01:21:39Z singleinstancedatabases.database.oracle.com 2022-06-22T01:21:40Z ``` -The following sections explain the setup and functionality of this controller. - -# Prerequsites to manage PDB Life Cycle using Oracle DB Operator Multitenant Database Controller +## Prerequsites to manage PDB Life Cycle using Oracle DB Operator Multitenant Database Controller -**CAUTION :** You must complete the following steps before managing the lifecycle of a PDB in a CDB using the Oracle DB Operator Multitenant Database Controller. - -* [Prepare CDB for PDB Lifecycle Management or PDB-LM](#prepare-cdb-for-pdb-lifecycle-management-pdb-lm) +* [Prepare the container database for PDB Lifecycle Management or PDB-LM](#prepare-cdb-for-pdb-lifecycle-management-pdb-lm) * [Oracle REST Data Service or ORDS Image](#oracle-rest-data-service-ords-image) * [Kubernetes Secrets](#kubernetes-secrets) * [Kubernetes CRD for CDB](#kubernetes-crd-for-cdb) * [Kubernetes CRD for PDB](#kubernetes-crd-for-pdb) +## Prepare the container database for PDB Lifecycle Management (PDB-LM) -+ ## Prepare CDB for PDB Lifecycle Management (PDB-LM) - -Pluggable Database (PDB) management operations are performed in the Container Database (CDB). These operations include create, clone, plug, unplug, delete, modify and map operations. +Pluggable Database (PDB) management operations are performed in the Container Database (CDB). These operations include create, clone, plug, unplug, delete, modify and map pdb. You cannot have an ORDS-enabled schema in the container database. To perform the PDB lifecycle management operations, you must first use the following steps to define the default CDB administrator credentials on target CDBs: @@ -117,29 +116,31 @@ col account_status for a30 select username, account_status from dba_users where username in ('ORDS_PUBLIC_USER','C##DBAPI_CDB_ADMIN','APEX_PUBLIC_USER','APEX_REST_PUBLIC_USER'); ``` -### Reference Setup: Example of a setup using OCI OKE(Kubernetes Cluster) and a CDB in Cloud (OCI Exadata Database Cluster) +## OCI OKE(Kubernetes Cluster) -See this [provisioning example setup](./provisioning/example_setup_using_oci_oke_cluster.md) for steps to configure a Kubernetes Cluster and a CDB. This example uses an OCI OKE Cluster as the Kubernetes Cluster and a CDB in OCI Exadata Database service. +You can use an [OKE in Oracle Cloud Infrastructure][okelink] to configure the operator for PDB lifecycle management. **Note that there is no restriction about container database location; it can be anywhere (on cloud or premises , on any supported platform).** +To quickly create an OKE cluster in your OCI cloud environment you can use the following [link](./provisioning/quickOKEcreation.md). +In this setup example [provisioning example setup](./provisioning/example_setup_using_oci_oke_cluster.md), the Container database is running on a OCI Exadata Database Cluster. -+ ## Oracle REST Data Service (ORDS) Image + +## Oracle REST Data Service (ORDS) Image - Oracle DB Operator Multitenant Database controller requires that the Oracle REST Data Services (ORDS) image for PDB Lifecycle Management is present in the target CDB. - - You can build this image by using the ORDS [Dockerfile](../../../ords/Dockerfile) + The PDB Database controllers require a pod running a dedicated rest server image based on [ORDS][ordsdoc]. Read the following [link](./provisioning/ords_image.md) to build the ords images. - For the steps to build the ORDS Docker image, see [ORDS_image](./provisioning/ords_image.md) +## Kubernetes Secrets + Multitenant Controllers use Kubernetes Secrets to store the required credential. The https certificates are stored in Kubernetes Secrets as well. -+ ## Kubernetes Secrets + **Note** In multi namespace enviroment you have to create specific secrets for each namespaces - Oracle DB Operator Multitenant Database Controller uses Kubernetes Secrets to store usernames and passwords that you must have to manage the lifecycle operations of a PDB in the target CDB. In addition, to use https protocol, all certificates need to be stored using Kubernetes Secret. + **Note** In multi namespace enviroment you have to create specific secrets for each namespaces **Note** In multi namespace enviroment you have to create specific secrets for each namespaces ### Secrets for CDB CRD - Create a secret file as shown here: [config/samples/multitenant/cdb_secret.yaml](../../config/samples/multitenant/cdb_secret.yaml). Modify this file with the `base64` encoded values of the required passwords for CDB, and use this file to create the required secrets. + Create a secret file as shown here: [config/samples/multitenant/cdb_secret.yaml](../multitenant/provisioning/singlenamespace/cdb_create.yaml). Modify this file with the `base64` encoded values of the required passwords for CDB, and use this file to create the required secrets. ```bash kubectl apply -f cdb_secret.yaml @@ -150,17 +151,16 @@ See this [provisioning example setup](./provisioning/example_setup_using_oci_oke ```bash echo -n "" | base64 ``` - The value that is returned is the base64-encoded value for that password string. **Note:** After successful creation of the CDB Resource, the CDB secrets are deleted from the Kubernetes system . ### Secrets for PDB CRD - Create a secret file as shown here: [config/samples/multitenant/pdb_secret.yaml](../../config/samples/multitenant/pdb_secret.yaml). Modify this file with the `base64` encoded values of the required passwords for PDB and use it to create the required secrets. + + Create a secret file as shown here: [pdb_secret.yaml](../multitenant/provisioning/singlenamespace/pdb_secret.yaml). Edit the file using your base64 credential and apply it. ```bash kubectl apply -f pdb_secret.yaml ``` - **NOTE:** To encode the password using `base64`, see the command example in the preceding **Secrets for CDB CRD** section. **NOTE:** Don't leave plaintext files containing sensitive data on disk. After loading the Secret, remove the plaintext file or move it to secure storage. @@ -182,43 +182,73 @@ kubectl create secret generic db-ca --from-file=ca.crt -n oracle-database-operat ``` image_not_found + **Note:** On successful creation of the certificates secret creation remove files or move to secure storage . -+ ## Kubernetes CRD for CDB +## Kubernetes CRD for CDB -The Oracle Database Operator Multitenant Controller creates the CDB kind as a custom resource that models a target CDB as a native Kubernetes object. This kind is used only to create Pods to connect to the target CDB to perform PDB-LM operations. These CDB resources can be scaled, based on the expected load, using replicas. Each CDB resource follows the CDB CRD as defined here: [config/crd/bases/database.oracle.com_cdbs.yaml](../../config/crd/bases/database.oracle.com_cdbs.yaml) +The Oracle Database Operator Multitenant Controller creates the CDB kind as a custom resource that models a target CDB as a native Kubernetes object. This kind is used only to create Pods to connect to the target CDB to perform PDB-LM operations. Each CDB resource follows the CDB CRD as defined here: [config/crd/bases/database.oracle.com_cdbs.yaml](../../config/crd/bases/database.oracle.com_cdbs.yaml) -To create a CDB CRD, see this example `.yaml` file: [config/samples/multitenant/cdb.yaml](../../config/samples/multitenant/cdb.yaml) +To create a CDB CRD, see this example `.yaml` file: [cdb_create.yaml](../multitenant/provisioning/singlenamespace/cdb_create.yaml) **Note:** The password and username fields in this *cdb.yaml* Yaml are the Kubernetes Secrets created earlier in this procedure. For more information, see the section [Kubernetes Secrets](https://kubernetes.io/docs/concepts/configuration/secret/). To understand more about creating secrets for pulling images from a Docker private registry, see [Kubernetes Private Registry Documenation]( https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/). -1. [Use Case: Create a CDB CRD Resource](./provisioning/cdb_crd_resource.md) -2. [Use Case: Add another replica to an existing CDB CRD Resource](./provisioning/add_replica.md) +Create a CDB CRD Resource example +```bash +kubectl apply -f cdb_create.yaml +``` -+ ## Kubernetes CRD for PDB +see [usecase01][uc01] and usecase03[uc03] for more information about file configuration -The Oracle Database Operator Multitenant Controller creates the PDB kind as a custom resource that models a PDB as a native Kubernetes object. There is a one-to-one mapping between the actual PDB and the Kubernetes PDB Custom Resource. You cannot have more than one Kubernetes resource for a target PDB. This PDB resource can be used to perform PDB-LM operations by specifying the action attribute in the PDB Specs. Each PDB resource follows the PDB CRD as defined here: [config/crd/bases/database.oracle.com_pdbs.yaml](../../../config/crd/bases/database.oracle.com_pdbs.yaml) +## Kubernetes CRD for PDB -To create a PDB CRD Resource, a sample .yaml file is available here: [config/samples/multitenant/pdb_create.yaml](../../config/samples/multitenant/pdb_create.yaml) +The Oracle Database Operator Multitenant Controller creates the PDB kind as a custom resource that models a PDB as a native Kubernetes object. There is a one-to-one mapping between the actual PDB and the Kubernetes PDB Custom Resource. You cannot have more than one Kubernetes resource for a target PDB. This PDB resource can be used to perform PDB-LM operations by specifying the action attribute in the PDB Specs. Each PDB resource follows the PDB CRD as defined here: [config/crd/bases/database.oracle.com_pdbs.yaml](../../../config/crd/bases/database.oracle.com_pdbs.yaml) -# Use Cases for PDB Lifecycle Management Operations using Oracle DB Operator Multitenant Controller +To create a PDB CRD Resource, a sample .yaml file is available here: [pdb_create.yaml](../multitenant/provisioning/singlenamespace/pdb_create.yaml) -Using the Oracle DB Operator Multitenant Controller, you can perform the following PDB-LM operations: CREATE, CLONE, MODIFY, DELETE, UNPLUG, PLUG. +```bash +kubectl apply -f cdb_create.yaml +``` -1. [Create PDB](./provisioning/create_pdb.md) -2. [Clone PDB](./provisioning/clone_pdb.md) -3. [Modify PDB](./provisioning/modify_pdb.md) -4. [Delete PDB](./provisioning/delete_pdb.md) -5. [Unplug PDB](./provisioning/unplug_pdb.md) -6. [Plug PDB](./provisioning/plug_pdb.md) +## Usecases files list +### Single Namespace -## Validation and Errors +1. [Create CDB](./provisioning/singlenamespace/cdb_create.yaml) +2. [Create PDB](./provisioning/singlenamespace/pdb_create.yaml) +3. [Clone PDB](./provisioning/singlenamespace/pdb_clone.yaml) +4. [Open PDB](./provisioning/singlenamespace/pdb_open.yaml) +4. [Close PDB](./provisioning/singlenamespace/pdb_close.yaml) +5. [Delete PDB](./provisioning/singlenamespace/pdb_delete.yaml) +6. [Unplug PDB](./provisioning/singlenamespace/pdb_unplug.yaml) +7. [Plug PDB](./provisioning/singlenamespace/pdb_plug.yaml) -To see how to look for any validation errors, see [validation_error](./provisioning/validation_error.md). +### Multiple namespace (cdbnamespace,dbnamespace) +1. [Create CDB](./provisioning/multinamespace/cdb_create.yaml) +2. [Create PDB](./provisioning/multinamespace/pdb_create.yaml) +3. [Clone PDB](./provisioning/multinamespace/pdb_clone.yaml) +4. [Open PDB](./provisioning/multinamespace/pdb_open.yaml) +4. [Close PDB](./provisioning/multinamespace/pdb_close.yaml) +5. [Delete PDB](./provisioning/multinamespace/pdb_delete.yaml) +6. [Unplug PDB](./provisioning/multinamespace/pdb_unplug.yaml) ## Known issues -To find out about known issue related to Oracle DB Operator Multitenant Controller, see [known_issues](./provisioning/known_issues.md). + - Ords installatian failure if pluaggable databases in the container db are not opened + + - Version 1.1.0: encoded password for https authentication may include carriege return as consequence the https request fails with http 404 error. W/A generate encoded password using **printf** instead of **echo**. + + - pdb controller authentication suddenly failes without any system change. Check the certificate expiration date **openssl .... -days 365** + + - Nothing happens after cdb yaml file applying: Make sure to have properly configure the WHATCH_NAMESPACE list in the operator yaml file + + [okelink]:https://docs.oracle.com/en-us/iaas/Content/ContEng/Concepts/contengoverview.htm + [ordsdoc]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/23.1/index.html + [uc01]:../multitenant/usecase01/README.md + [uc02]:../multitenant/usecase02/README.md + [uc03]:../multitenant/usecase03/README.md + [oradocpdb]:https://docs.oracle.com/en/database/oracle/oracle-database/21/multi/introduction-to-the-multitenant-architecture.html#GUID-AB84D6C9-4BBE-4D36-992F-2BB85739329F + + \ No newline at end of file diff --git a/docs/multitenant/provisioning/add_replica.log b/docs/multitenant/provisioning/add_replica.log deleted file mode 100644 index 53971443..00000000 --- a/docs/multitenant/provisioning/add_replica.log +++ /dev/null @@ -1,192 +0,0 @@ --- Check the status of CDB CRD Pod(s): - -% kubectl get all -n oracle-database-operator-system -NAME READY STATUS RESTARTS AGE -pod/cdb-dev-ords-rs-q2b68 1/1 Running 0 29s -pod/oracle-database-operator-controller-manager-76cb674c5c-4nrh8 1/1 Running 0 4d10h -pod/oracle-database-operator-controller-manager-76cb674c5c-f9wsd 1/1 Running 1 4d10h -pod/oracle-database-operator-controller-manager-76cb674c5c-xsv9g 1/1 Running 2 4d10h - -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/oracle-database-operator-controller-manager-metrics-service ClusterIP 10.96.98.47 8443/TCP 5d1h -service/oracle-database-operator-webhook-service ClusterIP 10.96.166.163 443/TCP 5d1h - -NAME READY UP-TO-DATE AVAILABLE AGE -deployment.apps/oracle-database-operator-controller-manager 3/3 3 3 5d1h - -NAME DESIRED CURRENT READY AGE -replicaset.apps/cdb-dev-ords-rs 1 1 1 31s -replicaset.apps/oracle-database-operator-controller-manager-76cb674c5c 3 3 3 5d1h - - --- .yaml file for the add replica use case: - -% cat add_replica.yaml -# -# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: CDB -metadata: - name: cdb-dev - namespace: oracle-database-operator-system -spec: - cdbName: "goldcdb" - scanName: "goldhost-scan.lbsub52b3b1cae.okecluster.oraclevcn.com" - dbServer: "goldhost1.lbsub52b3b1cae.okecluster.oraclevcn.com" - ordsImage: phx.ocir.io//oracle/ords:21.4.3 - dbPort: 1521 - replicas: 2 - serviceName: "goldcdb_phx1pw.lbsub52b3b1cae.okecluster.oraclevcn.com" - sysAdminPwd: - secret: - secretName: "cdb1-secret" - key: "sysadmin_pwd" - ordsPwd: - secret: - secretName: "cdb1-secret" - key: "ords_pwd" - cdbAdminUser: - secret: - secretName: "cdb1-secret" - key: "cdbadmin_user" - cdbAdminPwd: - secret: - secretName: "cdb1-secret" - key: "cdbadmin_pwd" - webServerUser: - secret: - secretName: "cdb1-secret" - key: "webserver_user" - webServerPwd: - secret: - secretName: "cdb1-secret" - key: "webserver_pwd" - - - - --- Apply the .yaml file: - -% kubectl apply -f add_replica.yaml -cdb.database.oracle.com/cdb-dev configured - - - --- Check the status of the CDB CRD Pod(s): - -% kubectl get all -n oracle-database-operator-system -NAME READY STATUS RESTARTS AGE -pod/cdb-dev-ords-rs-5bztb 1/1 Running 0 21s << New Pod Added -pod/cdb-dev-ords-rs-q2b68 1/1 Running 0 7m40s -pod/oracle-database-operator-controller-manager-76cb674c5c-4nrh8 1/1 Running 0 4d10h -pod/oracle-database-operator-controller-manager-76cb674c5c-f9wsd 1/1 Running 1 4d10h -pod/oracle-database-operator-controller-manager-76cb674c5c-xsv9g 1/1 Running 2 4d10h - -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/cdb-dev-ords ClusterIP None 6m25s -service/oracle-database-operator-controller-manager-metrics-service ClusterIP 10.96.98.47 8443/TCP 5d2h -service/oracle-database-operator-webhook-service ClusterIP 10.96.166.163 443/TCP 5d2h - -NAME READY UP-TO-DATE AVAILABLE AGE -deployment.apps/oracle-database-operator-controller-manager 3/3 3 3 5d2h - -NAME DESIRED CURRENT READY AGE -replicaset.apps/cdb-dev-ords-rs 2 2 2 7m42s -replicaset.apps/oracle-database-operator-controller-manager-76cb674c5c 3 3 3 5d2h - - - - - --- Logs from Oracle DB Operator Pod: - -% kubectl logs -f pod/oracle-database-operator-controller-manager-76cb674c5c-f9wsd -n oracle-database-operator-system -. -. -2022-06-27T03:24:34Z DEBUG controller-runtime.webhook.webhooks received request {"webhook": "/validate-database-oracle-com-v1alpha1-cdb", "UID": "19a3fbb6-57e4-4ad2-92c9-a90bb66cefae", "kind": "database.oracle.com/v1alpha1, Kind=CDB", "resource": {"group":"database.oracle.com","version":"v1alpha1","resource":"cdbs"}} -2022-06-27T03:24:34Z INFO cdb-webhook validate update {"name": "cdb-dev"} -2022-06-27T03:24:34Z DEBUG controller-runtime.webhook.webhooks wrote response {"webhook": "/validate-database-oracle-com-v1alpha1-cdb", "code": 200, "reason": "", "UID": "19a3fbb6-57e4-4ad2-92c9-a90bb66cefae", "allowed": true} -2022-06-27T03:24:34Z INFO controllers.CDB Reconcile requested {"onpremdboperator": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:24:34Z INFO controllers.CDB Res Status: {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "Ready", "Status": "true"} -2022-06-27T03:24:34Z INFO controllers.CDB Existing Replicas: 1, New Replicas: 2 {"evaluateSpecChange": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:24:34Z INFO controllers.CDB Current Phase:ValidatingPods {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev"} -2022-06-27T03:24:34Z INFO controllers.CDB Validating Pod creation for :cdb-dev {"validateORDSPod": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:24:34Z INFO controller.cdb Executing Command : {"reconciler group": "database.oracle.com", "reconciler kind": "CDB", "name": "cdb-dev", "namespace": "oracle-database-operator-system", "ExecCommand": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:24:34Z INFO controller.cdb bash -c curl -sSkv -k -X GET https://localhost:8888/ords/_/db-api/stable/metadata-catalog/ || curl -sSkv -X GET http://localhost:8888/ords/_/db-api/stable/metadata-catalog/ {"reconciler group": "database.oracle.com", "reconciler kind": "CDB", "name": "cdb-dev", "namespace": "oracle-database-operator-system", "ExecCommand": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:24:35Z INFO controllers.CDB Replicas: 2 {"validateORDSPod": "oracle-database-operator-system/cdb-dev", "Ready Pods: ": 1} -2022-06-27T03:24:35Z INFO controllers.CDB Reconcile queued {"onpremdboperator": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:24:35Z INFO controllers.CDB DEFER {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "ValidatingPods", "Status": "false"} -2022-06-27T03:24:50Z INFO controllers.CDB Reconcile requested {"onpremdboperator": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:24:50Z INFO controllers.CDB Res Status: {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "ValidatingPods", "Status": "false"} -2022-06-27T03:24:50Z INFO controllers.CDB Current Phase:ValidatingPods {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev"} -2022-06-27T03:24:50Z INFO controllers.CDB Validating Pod creation for :cdb-dev {"validateORDSPod": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:24:50Z INFO controller.cdb Executing Command : {"reconciler group": "database.oracle.com", "reconciler kind": "CDB", "name": "cdb-dev", "namespace": "oracle-database-operator-system", "ExecCommand": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:24:50Z INFO controller.cdb bash -c curl -sSkv -k -X GET https://localhost:8888/ords/_/db-api/stable/metadata-catalog/ || curl -sSkv -X GET http://localhost:8888/ords/_/db-api/stable/metadata-catalog/ {"reconciler group": "database.oracle.com", "reconciler kind": "CDB", "name": "cdb-dev", "namespace": "oracle-database-operator-system", "ExecCommand": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:24:50Z INFO controller.cdb Executing Command : {"reconciler group": "database.oracle.com", "reconciler kind": "CDB", "name": "cdb-dev", "namespace": "oracle-database-operator-system", "ExecCommand": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:24:50Z INFO controller.cdb bash -c curl -sSkv -k -X GET https://localhost:8888/ords/_/db-api/stable/metadata-catalog/ || curl -sSkv -X GET http://localhost:8888/ords/_/db-api/stable/metadata-catalog/ {"reconciler group": "database.oracle.com", "reconciler kind": "CDB", "name": "cdb-dev", "namespace": "oracle-database-operator-system", "ExecCommand": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:24:50Z INFO controllers.CDB Replicas: 2 {"validateORDSPod": "oracle-database-operator-system/cdb-dev", "Ready Pods: ": 1} -2022-06-27T03:24:50Z INFO controllers.CDB Reconcile queued {"onpremdboperator": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:24:50Z INFO controllers.CDB DEFER {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "ValidatingPods", "Status": "false"} -2022-06-27T03:25:05Z INFO controllers.CDB Reconcile requested {"onpremdboperator": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:25:05Z INFO controllers.CDB Res Status: {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "ValidatingPods", "Status": "false"} -2022-06-27T03:25:05Z INFO controllers.CDB Current Phase:ValidatingPods {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev"} -2022-06-27T03:25:05Z INFO controllers.CDB Validating Pod creation for :cdb-dev {"validateORDSPod": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:25:05Z INFO controller.cdb Executing Command : {"reconciler group": "database.oracle.com", "reconciler kind": "CDB", "name": "cdb-dev", "namespace": "oracle-database-operator-system", "ExecCommand": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:25:05Z INFO controller.cdb bash -c curl -sSkv -k -X GET https://localhost:8888/ords/_/db-api/stable/metadata-catalog/ || curl -sSkv -X GET http://localhost:8888/ords/_/db-api/stable/metadata-catalog/ {"reconciler group": "database.oracle.com", "reconciler kind": "CDB", "name": "cdb-dev", "namespace": "oracle-database-operator-system", "ExecCommand": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:25:05Z INFO controller.cdb Executing Command : {"reconciler group": "database.oracle.com", "reconciler kind": "CDB", "name": "cdb-dev", "namespace": "oracle-database-operator-system", "ExecCommand": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:25:05Z INFO controller.cdb bash -c curl -sSkv -k -X GET https://localhost:8888/ords/_/db-api/stable/metadata-catalog/ || curl -sSkv -X GET http://localhost:8888/ords/_/db-api/stable/metadata-catalog/ {"reconciler group": "database.oracle.com", "reconciler kind": "CDB", "name": "cdb-dev", "namespace": "oracle-database-operator-system", "ExecCommand": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:25:06Z INFO controllers.CDB DEFER {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "CreatingService", "Status": "false"} -2022-06-27T03:25:21Z INFO controllers.CDB Reconcile requested {"onpremdboperator": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:25:21Z INFO controllers.CDB Res Status: {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "CreatingService", "Status": "false"} -2022-06-27T03:25:21Z INFO controllers.CDB Current Phase:CreatingService {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev"} -2022-06-27T03:25:21Z INFO controllers.CDB ORDS Cluster Service already exists {"createORDSSVC": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:25:21Z INFO controllers.CDB DEFER {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "Ready", "Status": "false"} -2022-06-27T03:25:36Z INFO controllers.CDB Reconcile requested {"onpremdboperator": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:25:36Z INFO controllers.CDB Res Status: {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "Ready", "Status": "false"} -2022-06-27T03:25:36Z INFO controllers.CDB Current Phase:Ready {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev"} -2022-06-27T03:25:36Z INFO controllers.CDB DEFER {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "Ready", "Status": "true"} - - - --- Logs of the newly added CDB CRD Pod: - -% kubectl logs -f pod/cdb-dev-ords-rs-5bztb -n oracle-database-operator-system - -Retrieving information. -Requires to login with administrator privileges to verify Oracle REST Data Services schema. - -Connecting to database user: SYS AS SYSDBA url: jdbc:oracle:thin:@//goldhost1.lbsub52b3b1cae.okecluster.oraclevcn.com:1521/goldcdb_phx1pw.lbsub52b3b1cae.okecluster.oraclevcn.com - -Retrieving information.. -Completed verifying Oracle REST Data Services schema version 21.4.3.r1170405. -2022-06-27T03:24:40.351Z INFO reloaded pools: [] -2022-06-27T03:24:40.353Z INFO Oracle REST Data Services schema version 21.4.3.r1170405 is installed. -spawn java -jar /opt/oracle/ords/ords.war user sql_admin SQL Administrator -Enter a password for user sql_admin: -Confirm password for user sql_admin: -2022-06-27T03:24:42.034Z INFO Created user: sql_admin in file: /opt/oracle/ords/config/ords/credentials -2022-06-27T03:24:43.666Z INFO Modified: /opt/oracle/ords/config/ords/conf/apex_pu.xml, updated properties: database.api.admin.enabled, db.cdb.adminUser, db.cdb.adminUser.password -2022-06-27T03:24:45.455Z INFO HTTP and HTTP/2 cleartext listening on host: localhost port: 8888 -2022-06-27T03:24:45.520Z INFO The document root is serving static resources located in: /opt/oracle/ords/doc_root -2022-06-27T03:24:47.515Z INFO Configuration properties for: |apex|pu| -db.servicename=goldcdb_phx1pw.lbsub52b3b1cae.okecluster.oraclevcn.com -db.hostname=goldhost1.lbsub52b3b1cae.okecluster.oraclevcn.com -database.api.admin.enabled=true -db.password=****** -db.cdb.adminUser.password=****** -database.api.enabled=true -db.cdb.adminUser=C##DBAPI_CDB_ADMIN as SYSDBA -db.username=ORDS_PUBLIC_USER -restEnabledSql.active=true -resource.templates.enabled=true -db.port=1521 -feature.sdw=true -db.connectionType=basic - -2022-06-27T03:24:47.517Z WARNING *** jdbc.MaxLimit in configuration |apex|pu| is using a value of 10, this setting may not be sized adequately for a production environment *** -2022-06-27T03:24:47.517Z WARNING *** jdbc.InitialLimit in configuration |apex|pu| is using a value of 3, this setting may not be sized adequately for a production environment *** -2022-06-27T03:24:51.761Z INFO Oracle REST Data Services initialized -Oracle REST Data Services version : 21.4.3.r1170405 -Oracle REST Data Services server info: jetty/9.4.44.v20210927 diff --git a/docs/multitenant/provisioning/add_replica.md b/docs/multitenant/provisioning/add_replica.md deleted file mode 100644 index 1315dc9f..00000000 --- a/docs/multitenant/provisioning/add_replica.md +++ /dev/null @@ -1,36 +0,0 @@ -# Add a new replicate to an existing CDB CRD Resource using Oracle DB Operator On-Prem Controller - -In this use case, using the Oracle Database Operator On-Prem Controller, you will add a new replica to an existing CDB CRD resource. - -**NOTE:** It is assumed that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-manage-pdb-life-cycle-using-oracle-db-operator-on-prem-database-controller) steps. - -This example uses `add_replica.yaml` with: - -- CDB CRD resource Name as `cdb-dev` -- Container Database (CDB) Name as `goldcdb` -- Scan Name as `goldhost-scan.lbsub52b3b1cae.okecluster.oraclevcn.com` -- Database Server Name as `goldhost1.lbsub52b3b1cae.okecluster.oraclevcn.com` -- ORDS Docker Image as `phx.ocir.io//oracle/ords:21.4.3` -- Database Listener Port as `1521` -- Number of replicas for CDB CRD Resource as 2. - -**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [config/crd/bases/database.oracle.com_cdbs.yaml](../../../config/crd/bases/database.oracle.com_cdbs.yaml) - -Use the file: [add_replica.yaml](./add_replica.yaml) for this use case as below: - -1. Deploy the .yaml file: -```sh -[root@test-server oracle-database-operator]# kubectl apply -f add_replica.yaml -``` - -2. Monitor the Oracle DB Operator Pod for the progress of the CDB CRD Resource creation. - -NOTE: Check the DB Operator Pod name in your environment. - -``` -[root@test-server oracle-database-operator]# kubectl logs -f pod/oracle-database-operator-controller-manager-76cb674c5c-f9wsd -n oracle-database-operator-system -``` - -## Sample Output - -[Here](./add_replica.log) is the sample output for a PDB created using Oracle DB Operator On-Prem Controller using file [add_replica.yaml](./add_replica.yaml) diff --git a/docs/multitenant/provisioning/add_replica.yaml b/docs/multitenant/provisioning/add_replica.yaml deleted file mode 100644 index fac2d7ba..00000000 --- a/docs/multitenant/provisioning/add_replica.yaml +++ /dev/null @@ -1,40 +0,0 @@ -# -# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: CDB -metadata: - name: cdb-dev - namespace: oracle-database-operator-system -spec: - cdbName: "goldcdb" - dbServer: "goldhost1.lbsub52b3b1cae.okecluster.oraclevcn.com" - ordsImage: phx.ocir.io//oracle/ords:21.4.3 - dbPort: 1521 - replicas: 2 - serviceName: "goldcdb_phx1pw.lbsub52b3b1cae.okecluster.oraclevcn.com" - sysAdminPwd: - secret: - secretName: "cdb1-secret" - key: "sysadmin_pwd" - ordsPwd: - secret: - secretName: "cdb1-secret" - key: "ords_pwd" - cdbAdminUser: - secret: - secretName: "cdb1-secret" - key: "cdbadmin_user" - cdbAdminPwd: - secret: - secretName: "cdb1-secret" - key: "cdbadmin_pwd" - webServerUser: - secret: - secretName: "cdb1-secret" - key: "webserver_user" - webServerPwd: - secret: - secretName: "cdb1-secret" - key: "webserver_pwd" diff --git a/docs/multitenant/provisioning/cdb.log b/docs/multitenant/provisioning/cdb.log deleted file mode 100644 index 8c3cbdc5..00000000 --- a/docs/multitenant/provisioning/cdb.log +++ /dev/null @@ -1,279 +0,0 @@ --- Check the status of the Oracle DB Operator Pods: - -% kubectl get all -n oracle-database-operator-system -NAME READY STATUS RESTARTS AGE -pod/oracle-database-operator-controller-manager-76cb674c5c-4nrh8 1/1 Running 0 29h -pod/oracle-database-operator-controller-manager-76cb674c5c-f9wsd 1/1 Running 1 29h -pod/oracle-database-operator-controller-manager-76cb674c5c-xsv9g 1/1 Running 2 29h - -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/oracle-database-operator-controller-manager-metrics-service ClusterIP 10.96.98.47 8443/TCP 45h -service/oracle-database-operator-webhook-service ClusterIP 10.96.166.163 443/TCP 45h - -NAME READY UP-TO-DATE AVAILABLE AGE -deployment.apps/oracle-database-operator-controller-manager 3/3 3 3 45h - -NAME DESIRED CURRENT READY AGE -replicaset.apps/oracle-database-operator-controller-manager-76cb674c5c 3 3 3 45h - - - --- Get the base64 values for the required passwords as below: - -% echo -n "WElcome_21##" | base64 -V0VsY29tZV8yMSMj - -% echo -n "C##DBAPI_CDB_ADMIN" | base64 -QyMjREJBUElfQ0RCX0FETUlO - -% echo -n "sql_admin" | base64 -c3FsX2FkbWlu - -% echo -n "welcome1" | base64 -d2VsY29tZTE= - - --- Add the base64 encoded values against the required variables in cdb_secret.yaml file for this use case: - -% cat cdb_secret.yaml -# -# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: v1 -kind: Secret -metadata: - name: cdb1-secret - namespace: oracle-database-operator-system -type: Opaque -data: - ords_pwd: "V0VsY29tZV8yMSMj" - sysadmin_pwd: "V0VsY29tZV8yMSMj" - cdbadmin_user: "QyMjREJBUElfQ0RCX0FETUlO" - cdbadmin_pwd: "V0VsY29tZV8yMSMj" - webserver_user: "c3FsX2FkbWlu" - webserver_pwd: "d2VsY29tZTE=" - - --- Check the contents of the cdb.yaml file: -% cat cdb.yaml -# -# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: CDB -metadata: - name: cdb-dev - namespace: oracle-database-operator-system -spec: - cdbName: "goldcdb" - scanName: "goldhost-scan.lbsub52b3b1cae.okecluster.oraclevcn.com" - dbServer: "goldhost1.lbsub52b3b1cae.okecluster.oraclevcn.com" - ordsImage: phx.ocir.io//oracle/ords:21.4.3 - ordsImagePullSecret: "container-registry-secret" - dbPort: 1521 - replicas: 1 - serviceName: "goldcdb_phx1pw.lbsub52b3b1cae.okecluster.oraclevcn.com" - sysAdminPwd: - secret: - secretName: "cdb1-secret" - key: "sysadmin_pwd" - ordsPwd: - secret: - secretName: "cdb1-secret" - key: "ords_pwd" - cdbAdminUser: - secret: - secretName: "cdb1-secret" - key: "cdbadmin_user" - cdbAdminPwd: - secret: - secretName: "cdb1-secret" - key: "cdbadmin_pwd" - webServerUser: - secret: - secretName: "cdb1-secret" - key: "webserver_user" - webServerPwd: - secret: - secretName: "cdb1-secret" - key: "webserver_pwd" - - - --- Apply the .yaml files: - -% kubectl apply -f cdb_secret.yaml -secret/cdb1-secret created - -% kubectl apply -f cdb.yaml -cdb.database.oracle.com/cdb-dev created - - - --- Monitor the Oracle DB Operator Pod during the period when the CDB CRD is getting deployed: - -% kubectl logs -f pod/oracle-database-operator-controller-manager-76cb674c5c-f9wsd -n oracle-database-operator-system -. -. -2022-06-27T03:16:44Z INFO controllers.CDB Reconcile requested {"onpremdboperator": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:16:44Z INFO controllers.CDB Res Status: {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "", "Status": "false"} -2022-06-27T03:16:44Z INFO controllers.CDB Adding finalizer {"manageCDBDeletion": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:16:44Z INFO controllers.CDB Current Phase: {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev"} -2022-06-27T03:16:44Z INFO controllers.CDB DEFAULT: {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "", "Status": "false"} -2022-06-27T03:16:44Z INFO controllers.CDB DEFER {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "Initializing", "Status": "false"} -2022-06-27T03:17:00Z INFO controllers.CDB Reconcile requested {"onpremdboperator": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:17:00Z INFO controllers.CDB Res Status: {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "Initializing", "Status": "false"} -2022-06-27T03:17:00Z INFO controllers.CDB Current Phase:Initializing {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev"} -2022-06-27T03:17:00Z INFO controllers.CDB Verified secrets successfully {"verifySecrets": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:17:00Z INFO controllers.CDB DEFER {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "CreatingPod", "Status": "false"} -2022-06-27T03:17:15Z INFO controllers.CDB Reconcile requested {"onpremdboperator": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:17:15Z INFO controllers.CDB Res Status: {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "CreatingPod", "Status": "false"} -2022-06-27T03:17:15Z INFO controllers.CDB Current Phase:CreatingPod {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev"} -2022-06-27T03:17:15Z INFO controllers.CDB Creating ORDS Replicaset: cdb-dev-ords-rs {"createORDSInstances": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:17:15Z INFO controllers.CDB Created ORDS ReplicaSet successfully {"createORDSInstances": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:17:15Z DEBUG events Normal {"object": {"kind":"CDB","namespace":"oracle-database-operator-system","name":"cdb-dev","uid":"c36e8d5f-6103-4a70-a840-16c6683755ec","apiVersion":"database.oracle.com/v1alpha1","resourceVersion":"101443216"}, "reason": "CreatedORDSReplicaSet", "message": "Created ORDS Replicaset (Replicas - 1) for cdb-dev"} -2022-06-27T03:17:15Z INFO controllers.CDB DEFER {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "ValidatingPods", "Status": "false"} -2022-06-27T03:17:30Z INFO controllers.CDB Reconcile requested {"onpremdboperator": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:17:30Z INFO controllers.CDB Res Status: {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "ValidatingPods", "Status": "false"} -2022-06-27T03:17:30Z INFO controllers.CDB Current Phase:ValidatingPods {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev"} -2022-06-27T03:17:30Z INFO controllers.CDB Validating Pod creation for :cdb-dev {"validateORDSPod": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:17:30Z INFO controller.cdb Executing Command : {"reconciler group": "database.oracle.com", "reconciler kind": "CDB", "name": "cdb-dev", "namespace": "oracle-database-operator-system", "ExecCommand": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:17:30Z INFO controller.cdb bash -c curl -sSkv -k -X GET https://localhost:8888/ords/_/db-api/stable/metadata-catalog/ || curl -sSkv -X GET http://localhost:8888/ords/_/db-api/stable/metadata-catalog/ {"reconciler group": "database.oracle.com", "reconciler kind": "CDB", "name": "cdb-dev", "namespace": "oracle-database-operator-system", "ExecCommand": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:17:30Z INFO controllers.CDB Replicas: 1 {"validateORDSPod": "oracle-database-operator-system/cdb-dev", "Ready Pods: ": 0} -2022-06-27T03:17:30Z INFO controllers.CDB Reconcile queued {"onpremdboperator": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:17:30Z INFO controllers.CDB DEFER {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "ValidatingPods", "Status": "false"} -2022-06-27T03:17:45Z INFO controllers.CDB Reconcile requested {"onpremdboperator": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:17:45Z INFO controllers.CDB Res Status: {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "ValidatingPods", "Status": "false"} -2022-06-27T03:17:45Z INFO controllers.CDB Current Phase:ValidatingPods {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev"} -2022-06-27T03:17:45Z INFO controllers.CDB Validating Pod creation for :cdb-dev {"validateORDSPod": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:17:45Z INFO controller.cdb Executing Command : {"reconciler group": "database.oracle.com", "reconciler kind": "CDB", "name": "cdb-dev", "namespace": "oracle-database-operator-system", "ExecCommand": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:17:45Z INFO controller.cdb bash -c curl -sSkv -k -X GET https://localhost:8888/ords/_/db-api/stable/metadata-catalog/ || curl -sSkv -X GET http://localhost:8888/ords/_/db-api/stable/metadata-catalog/ {"reconciler group": "database.oracle.com", "reconciler kind": "CDB", "name": "cdb-dev", "namespace": "oracle-database-operator-system", "ExecCommand": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:17:45Z INFO controllers.CDB Replicas: 1 {"validateORDSPod": "oracle-database-operator-system/cdb-dev", "Ready Pods: ": 0} -2022-06-27T03:17:45Z INFO controllers.CDB Reconcile queued {"onpremdboperator": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:17:45Z INFO controllers.CDB DEFER {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "ValidatingPods", "Status": "false"} -2022-06-27T03:18:00Z INFO controllers.CDB Reconcile requested {"onpremdboperator": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:18:00Z INFO controllers.CDB Res Status: {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "ValidatingPods", "Status": "false"} -2022-06-27T03:18:00Z INFO controllers.CDB Current Phase:ValidatingPods {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev"} -2022-06-27T03:18:00Z INFO controllers.CDB Validating Pod creation for :cdb-dev {"validateORDSPod": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:18:00Z INFO controller.cdb Executing Command : {"reconciler group": "database.oracle.com", "reconciler kind": "CDB", "name": "cdb-dev", "namespace": "oracle-database-operator-system", "ExecCommand": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:18:00Z INFO controller.cdb bash -c curl -sSkv -k -X GET https://localhost:8888/ords/_/db-api/stable/metadata-catalog/ || curl -sSkv -X GET http://localhost:8888/ords/_/db-api/stable/metadata-catalog/ {"reconciler group": "database.oracle.com", "reconciler kind": "CDB", "name": "cdb-dev", "namespace": "oracle-database-operator-system", "ExecCommand": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:18:01Z INFO controllers.CDB Replicas: 1 {"validateORDSPod": "oracle-database-operator-system/cdb-dev", "Ready Pods: ": 0} -2022-06-27T03:18:01Z INFO controllers.CDB Reconcile queued {"onpremdboperator": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:18:01Z INFO controllers.CDB DEFER {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "ValidatingPods", "Status": "false"} -2022-06-27T03:18:16Z INFO controllers.CDB Reconcile requested {"onpremdboperator": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:18:16Z INFO controllers.CDB Res Status: {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "ValidatingPods", "Status": "false"} -2022-06-27T03:18:16Z INFO controllers.CDB Current Phase:ValidatingPods {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev"} -2022-06-27T03:18:16Z INFO controllers.CDB Validating Pod creation for :cdb-dev {"validateORDSPod": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:18:16Z INFO controller.cdb Executing Command : {"reconciler group": "database.oracle.com", "reconciler kind": "CDB", "name": "cdb-dev", "namespace": "oracle-database-operator-system", "ExecCommand": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:18:16Z INFO controller.cdb bash -c curl -sSkv -k -X GET https://localhost:8888/ords/_/db-api/stable/metadata-catalog/ || curl -sSkv -X GET http://localhost:8888/ords/_/db-api/stable/metadata-catalog/ {"reconciler group": "database.oracle.com", "reconciler kind": "CDB", "name": "cdb-dev", "namespace": "oracle-database-operator-system", "ExecCommand": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:18:16Z INFO controllers.CDB DEFER {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "CreatingService", "Status": "false"} -2022-06-27T03:18:31Z INFO controllers.CDB Reconcile requested {"onpremdboperator": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:18:31Z INFO controllers.CDB Res Status: {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "CreatingService", "Status": "false"} -2022-06-27T03:18:31Z INFO controllers.CDB Current Phase:CreatingService {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev"} -2022-06-27T03:18:31Z INFO controllers.CDB Creating a new Cluster Service for: cdb-dev {"createORDSSVC": "oracle-database-operator-system/cdb-dev", "Svc.Namespace": "oracle-database-operator-system", "Service.Name": "cdb-dev-ords"} -2022-06-27T03:18:31Z INFO controllers.CDB Created ORDS Cluster Service successfully {"createORDSSVC": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:18:31Z DEBUG events Normal {"object": {"kind":"CDB","namespace":"oracle-database-operator-system","name":"cdb-dev","uid":"c36e8d5f-6103-4a70-a840-16c6683755ec","apiVersion":"database.oracle.com/v1alpha1","resourceVersion":"101443627"}, "reason": "CreatedORDSService", "message": "Created ORDS Service for cdb-dev"} -2022-06-27T03:18:31Z INFO controllers.CDB DEFER {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "Ready", "Status": "false"} -2022-06-27T03:18:46Z INFO controllers.CDB Reconcile requested {"onpremdboperator": "oracle-database-operator-system/cdb-dev"} -2022-06-27T03:18:46Z INFO controllers.CDB Res Status: {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "Ready", "Status": "false"} -2022-06-27T03:18:46Z INFO controllers.CDB Current Phase:Ready {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev"} -2022-06-27T03:18:46Z INFO controllers.CDB DEFER {"onpremdboperator": "oracle-database-operator-system/cdb-dev", "Name": "cdb-dev", "Phase": "Ready", "Status": "true"} - - - - --- Check the status of the CDB CRD Pod after few minutes of running above commands: - -% kubectl get all -n oracle-database-operator-system -NAME READY STATUS RESTARTS AGE -pod/cdb-dev-ords-rs-q2b68 1/1 Running 0 29s <<< CDB CRD Resource Pod -pod/oracle-database-operator-controller-manager-76cb674c5c-4nrh8 1/1 Running 0 4d10h -pod/oracle-database-operator-controller-manager-76cb674c5c-f9wsd 1/1 Running 1 4d10h -pod/oracle-database-operator-controller-manager-76cb674c5c-xsv9g 1/1 Running 2 4d10h - -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/oracle-database-operator-controller-manager-metrics-service ClusterIP 10.96.98.47 8443/TCP 5d1h -service/oracle-database-operator-webhook-service ClusterIP 10.96.166.163 443/TCP 5d1h - -NAME READY UP-TO-DATE AVAILABLE AGE -deployment.apps/oracle-database-operator-controller-manager 3/3 3 3 5d1h - -NAME DESIRED CURRENT READY AGE -replicaset.apps/cdb-dev-ords-rs 1 1 1 31s -replicaset.apps/oracle-database-operator-controller-manager-76cb674c5c 3 3 3 5d1h - - - - --- Check the logs of the CDB CRD Pod created above: - -% kubectl logs -f pod/cdb-dev-ords-rs-q2b68 -n oracle-database-operator-system -Requires to login with administrator privileges to verify Oracle REST Data Services schema. - -Connecting to database user: SYS AS SYSDBA url: jdbc:oracle:thin:@//goldhost1.lbsub52b3b1cae.okecluster.oraclevcn.com:1521/goldcdb_phx1pw.lbsub52b3b1cae.okecluster.oraclevcn.com - -Retrieving information.. -Your database connection is to a CDB. ORDS common user ORDS_PUBLIC_USER will be created in the CDB. ORDS schema will be installed in the PDBs. -Root CDB$ROOT - create ORDS common user -PDB PDB$SEED - install ORDS 21.4.3.r1170405 - -2022-06-27T03:17:22.015Z INFO reloaded pools: [] -2022-06-27T03:17:22.024Z INFO - -2022-06-27T03:17:23.200Z INFO Installing Oracle REST Data Services version 21.4.3.r1170405 in CDB$ROOT -2022-06-27T03:17:23.234Z INFO ... Log file written to /home/oracle/ords_cdb_install_core_CDB_ROOT_2022-06-27_031723_00234.log -2022-06-27T03:17:24.352Z INFO ... Verified database prerequisites -2022-06-27T03:17:24.662Z INFO ... Created Oracle REST Data Services proxy user -2022-06-27T03:17:24.708Z INFO Completed installation for Oracle REST Data Services version 21.4.3.r1170405. Elapsed time: 00:00:01.504 - -2022-06-27T03:17:24.722Z INFO Installing Oracle REST Data Services version 21.4.3.r1170405 in PDB$SEED -2022-06-27T03:17:24.731Z INFO ... Log file written to /home/oracle/ords_cdb_install_core_PDB_SEED_2022-06-27_031724_00731.log -2022-06-27T03:17:24.863Z INFO ... Verified database prerequisites -2022-06-27T03:17:25.123Z INFO ... Created Oracle REST Data Services proxy user -2022-06-27T03:17:25.568Z INFO ... Created Oracle REST Data Services schema -2022-06-27T03:17:26.252Z INFO ... Granted privileges to Oracle REST Data Services -2022-06-27T03:17:34.493Z INFO ... Created Oracle REST Data Services database objects -2022-06-27T03:17:43.730Z INFO ... Log file written to /home/oracle/ords_cdb_install_datamodel_PDB_SEED_2022-06-27_031743_00730.log -2022-06-27T03:17:45.883Z INFO ... Log file written to /home/oracle/ords_cdb_install_scheduler_PDB_SEED_2022-06-27_031745_00883.log -2022-06-27T03:17:49.296Z INFO ... Log file written to /home/oracle/ords_cdb_install_apex_PDB_SEED_2022-06-27_031749_00296.log -2022-06-27T03:17:51.492Z INFO Completed installation for Oracle REST Data Services version 21.4.3.r1170405. Elapsed time: 00:00:26.768 - -2022-06-27T03:17:51.492Z INFO Completed CDB installation for Oracle REST Data Services version 21.4.3.r1170405. Total elapsed time: 00:00:28.297 - -spawn java -jar /opt/oracle/ords/ords.war user sql_admin SQL Administrator -Enter a password for user sql_admin: -Confirm password for user sql_admin: -2022-06-27T03:17:53.192Z INFO Created user: sql_admin in file: /opt/oracle/ords/config/ords/credentials -2022-06-27T03:17:54.816Z INFO Modified: /opt/oracle/ords/config/ords/conf/apex_pu.xml, updated properties: database.api.admin.enabled, db.cdb.adminUser, db.cdb.adminUser.password -2022-06-27T03:17:56.583Z INFO HTTP and HTTP/2 cleartext listening on host: localhost port: 8888 -2022-06-27T03:17:56.647Z INFO The document root is serving static resources located in: /opt/oracle/ords/doc_root -2022-06-27T03:17:58.593Z INFO Configuration properties for: |apex|pu| -db.servicename=goldcdb_phx1pw.lbsub52b3b1cae.okecluster.oraclevcn.com -db.hostname=goldhost1.lbsub52b3b1cae.okecluster.oraclevcn.com -database.api.admin.enabled=true -db.password=****** -db.cdb.adminUser.password=****** -database.api.enabled=true -db.cdb.adminUser=C##DBAPI_CDB_ADMIN as SYSDBA -db.username=ORDS_PUBLIC_USER -restEnabledSql.active=true -resource.templates.enabled=true -db.port=1521 -feature.sdw=true -db.connectionType=basic - -2022-06-27T03:17:58.595Z WARNING *** jdbc.MaxLimit in configuration |apex|pu| is using a value of 10, this setting may not be sized adequately for a production environment *** -2022-06-27T03:17:58.595Z WARNING *** jdbc.InitialLimit in configuration |apex|pu| is using a value of 3, this setting may not be sized adequately for a production environment *** -2022-06-27T03:18:02.803Z INFO Oracle REST Data Services initialized -Oracle REST Data Services version : 21.4.3.r1170405 -Oracle REST Data Services server info: jetty/9.4.44.v20210927 - - - --- Check the CDB CRD Resource and it should be in "Ready" status for a successful deployment: - -% kubectl get cdbs -A -NAMESPACE NAME CDB NAME DB SERVER DB PORT SCAN NAME REPLICAS STATUS MESSAGE -oracle-database-operator-system cdb-dev goldcdb goldhost1.lbsub52b3b1cae.okecluster.oraclevcn.com 1521 goldhost-scan.lbsub52b3b1cae.okecluster.oraclevcn.com 1 Ready diff --git a/docs/multitenant/provisioning/cdb.yaml b/docs/multitenant/provisioning/cdb.yaml deleted file mode 100644 index 8e25a763..00000000 --- a/docs/multitenant/provisioning/cdb.yaml +++ /dev/null @@ -1,41 +0,0 @@ -# -# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: CDB -metadata: - name: cdb-dev - namespace: oracle-database-operator-system -spec: - cdbName: "goldcdb" - dbServer: "goldhost1.lbsub52b3b1cae.okecluster.oraclevcn.com" - ordsImage: phx.ocir.io//oracle/ords:21.4.3 - ordsImagePullSecret: "container-registry-secret" - dbPort: 1521 - replicas: 1 - serviceName: "goldcdb_phx1pw.lbsub52b3b1cae.okecluster.oraclevcn.com" - sysAdminPwd: - secret: - secretName: "cdb1-secret" - key: "sysadmin_pwd" - ordsPwd: - secret: - secretName: "cdb1-secret" - key: "ords_pwd" - cdbAdminUser: - secret: - secretName: "cdb1-secret" - key: "cdbadmin_user" - cdbAdminPwd: - secret: - secretName: "cdb1-secret" - key: "cdbadmin_pwd" - webServerUser: - secret: - secretName: "cdb1-secret" - key: "webserver_user" - webServerPwd: - secret: - secretName: "cdb1-secret" - key: "webserver_pwd" diff --git a/docs/multitenant/provisioning/cdb_crd_resource.md b/docs/multitenant/provisioning/cdb_crd_resource.md deleted file mode 100644 index f1f36404..00000000 --- a/docs/multitenant/provisioning/cdb_crd_resource.md +++ /dev/null @@ -1,38 +0,0 @@ -# Create a CDB CRD Resource using Oracle DB Operator On-Prem Controller - -In this use case, using the Oracle Database Operator On-Prem Controller, you will create the CDB kind as a custom resource that will model a CDB as a native Kubernetes object. - -**NOTE:** It is assumed that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-manage-pdb-life-cycle-using-oracle-db-operator-on-prem-database-controller) steps. - -This example uses `create_cdb.yaml` with: - -- CDB CRD resource Name as `cdb-dev` -- Container Database (CDB) Name as `goldcdb` -- Scan Name as `goldhost-scan.lbsub52b3b1cae.okecluster.oraclevcn.com` -- Database Server Name as `goldhost1.lbsub52b3b1cae.okecluster.oraclevcn.com` -- ORDS Docker Image as `phx.ocir.io//oracle/ords:21.4.3` -- Image Pull Secret as `container-registry-secret` -- Database Listener Port as `1521` -- Database Service Name as `goldcdb_phx1pw.lbsub52b3b1cae.okecluster.oraclevcn.com` -- Number of replicas for CDB CRD Resource as 1 - -**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [config/crd/bases/database.oracle.com_cdbs.yaml](../../../config/crd/bases/database.oracle.com_cdbs.yaml) - -Use the file: [cdb.yaml](./cdb.yaml) for this use case as below: - -1. Deploy the .yaml file: -```sh -[root@test-server oracle-database-operator]# kubectl apply -f cdb.yaml -``` - -2. Monitor the Oracle DB Operator Pod for the progress of the CDB CRD Resource creation. - -NOTE: Check the DB Operator Pod name in your environment. - -``` -[root@test-server oracle-database-operator]# kubectl logs -f pod/oracle-database-operator-controller-manager-76cb674c5c-f9wsd -n oracle-database-operator-system -``` - -## Sample Output - -[Here](./cdb.log) is the sample output for a PDB created using Oracle DB Operator On-Prem Controller using file [cdb.yaml](./cdb.yaml) diff --git a/docs/multitenant/provisioning/cdb_secret.yaml b/docs/multitenant/provisioning/cdb_secret.yaml deleted file mode 100644 index 4d03499e..00000000 --- a/docs/multitenant/provisioning/cdb_secret.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# -# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: v1 -kind: Secret -metadata: - name: cdb1-secret - namespace: oracle-database-operator-system -type: Opaque -data: - ords_pwd: "[ base64 encode values ]" - sysadmin_pwd: "[ base64 encode values ]" - cdbadmin_user: "[ base64 encode values ]" - cdbadmin_pwd: "[ base64 encode values ]" - webserver_user: "[ base64 encode values ]" - webserver_pwd: "[base64 encode values ]" diff --git a/docs/multitenant/provisioning/clone_pdb.log b/docs/multitenant/provisioning/clone_pdb.log deleted file mode 100644 index 54b54ef6..00000000 --- a/docs/multitenant/provisioning/clone_pdb.log +++ /dev/null @@ -1,137 +0,0 @@ --- Check the Oracle DB Operator Pod and CDB CRD Pod status: - -% kubectl get all -n oracle-database-operator-system -NAME READY STATUS RESTARTS AGE -pod/cdb-dev-ords-rs-5bztb 1/1 Running 0 5m23s -pod/cdb-dev-ords-rs-q2b68 1/1 Running 0 12m -pod/oracle-database-operator-controller-manager-76cb674c5c-4nrh8 1/1 Running 0 4d10h -pod/oracle-database-operator-controller-manager-76cb674c5c-f9wsd 1/1 Running 1 4d10h -pod/oracle-database-operator-controller-manager-76cb674c5c-xsv9g 1/1 Running 2 4d10h - -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/cdb-dev-ords ClusterIP None 11m -service/oracle-database-operator-controller-manager-metrics-service ClusterIP 10.96.98.47 8443/TCP 5d2h -service/oracle-database-operator-webhook-service ClusterIP 10.96.166.163 443/TCP 5d2h - -NAME READY UP-TO-DATE AVAILABLE AGE -deployment.apps/oracle-database-operator-controller-manager 3/3 3 3 5d2h - -NAME DESIRED CURRENT READY AGE -replicaset.apps/cdb-dev-ords-rs 2 2 2 12m -replicaset.apps/oracle-database-operator-controller-manager-76cb674c5c 3 3 3 5d2h - - --- Check the current PDB CRD resource: - -% kubectl get pdbs -A -NAMESPACE NAME CONNECT STRING CDB NAME PDB NAME PDB STATE PDB SIZE STATUS MESSAGE -oracle-database-operator-system pdb1 goldhost-scan.lbsub52b3b1cae.okecluster.oraclevcn.com:1521/pdbnew goldcdb pdbnew READ WRITE 1G Ready Success - - --- Check the current PDBs in the target CDB: - -SQL> show pdbs - - CON_ID CON_NAME OPEN MODE RESTRICTED ----------- ------------------------------ ---------- ---------- - 2 PDB$SEED READ WRITE NO - 3 PDBNEW READ WRITE NO - - - --- .yaml file used in this use case to clone a PDB: - -% cat clone_pdb.yaml -# -# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: PDB -metadata: - name: pdb1-clone - namespace: oracle-database-operator-system - labels: - cdb: cdb-dev -spec: - cdbResName: "cdb-dev" - cdbName: "goldcdb" - pdbName: "pdbnewclone" - srcPdbName: "pdbnew" - fileNameConversions: "NONE" - totalSize: "UNLIMITED" - tempSize: "UNLIMITED" - action: "Clone" - - --- Apply the .yaml file: - -% kubectl apply -f clone_pdb.yaml -pdb.database.oracle.com/pdb1-clone created - - - -- Monitor the logs from the Oracle DB Operator Pod: - -% kubectl logs -f pod/oracle-database-operator-controller-manager-76cb674c5c-f9wsd -n oracle-database-operator-system -. -. -2022-06-27T03:37:21Z DEBUG controller-runtime.webhook.webhooks received request {"webhook": "/mutate-database-oracle-com-v1alpha1-pdb", "UID": "7fbbd983-0309-4603-9c3e-77f7ffded000", "kind": "database.oracle.com/v1alpha1, Kind=PDB", "resource": {"group":"database.oracle.com","version":"v1alpha1","resource":"pdbs"}} -2022-06-27T03:37:21Z INFO pdb-webhook Setting default values in PDB spec for : pdb1-clone -2022-06-27T03:37:21Z INFO pdb-webhook - reuseTempFile : true -2022-06-27T03:37:21Z INFO pdb-webhook - unlimitedStorage : true -2022-06-27T03:37:21Z INFO pdb-webhook - tdeImport : false -2022-06-27T03:37:21Z INFO pdb-webhook - tdeExport : false -2022-06-27T03:37:21Z INFO pdb-webhook - asClone : false -2022-06-27T03:37:21Z INFO pdb-webhook - getScript : false -2022-06-27T03:37:21Z DEBUG controller-runtime.webhook.webhooks wrote response {"webhook": "/mutate-database-oracle-com-v1alpha1-pdb", "code": 200, "reason": "", "UID": "7fbbd983-0309-4603-9c3e-77f7ffded000", "allowed": true} -2022-06-27T03:37:21Z DEBUG controller-runtime.webhook.webhooks received request {"webhook": "/validate-database-oracle-com-v1alpha1-pdb", "UID": "12aa43ea-df81-4931-b29b-d665c121590f", "kind": "database.oracle.com/v1alpha1, Kind=PDB", "resource": {"group":"database.oracle.com","version":"v1alpha1","resource":"pdbs"}} -2022-06-27T03:37:21Z INFO pdb-webhook ValidateCreate-Validating PDB spec for : pdb1-clone -2022-06-27T03:37:21Z INFO pdb-webhook validateCommon {"name": "pdb1-clone"} -2022-06-27T03:37:21Z INFO pdb-webhook Valdiating PDB Resource Action : CLONE -2022-06-27T03:37:21Z INFO pdb-webhook PDB Resource : pdb1-clone successfully validated for Action : CLONE -2022-06-27T03:37:21Z DEBUG controller-runtime.webhook.webhooks wrote response {"webhook": "/validate-database-oracle-com-v1alpha1-pdb", "code": 200, "reason": "", "UID": "12aa43ea-df81-4931-b29b-d665c121590f", "allowed": true} -2022-06-27T03:37:21Z INFO controllers.PDB Reconcile requested {"onpremdboperator": "oracle-database-operator-system/pdb1-clone"} -2022-06-27T03:37:21Z INFO controllers.PDB Adding finalizer {"managePDBDeletion": "oracle-database-operator-system/pdb1-clone"} -2022-06-27T03:37:21Z DEBUG controller-runtime.webhook.webhooks received request {"webhook": "/mutate-database-oracle-com-v1alpha1-pdb", "UID": "b3bbaa0d-6865-4dd1-9ca9-d54f916a8d66", "kind": "database.oracle.com/v1alpha1, Kind=PDB", "resource": {"group":"database.oracle.com","version":"v1alpha1","resource":"pdbs"}} -2022-06-27T03:37:21Z INFO pdb-webhook Setting default values in PDB spec for : pdb1-clone -2022-06-27T03:37:21Z DEBUG controller-runtime.webhook.webhooks wrote response {"webhook": "/mutate-database-oracle-com-v1alpha1-pdb", "code": 200, "reason": "", "UID": "b3bbaa0d-6865-4dd1-9ca9-d54f916a8d66", "allowed": true} -2022-06-27T03:37:21Z DEBUG controller-runtime.webhook.webhooks received request {"webhook": "/validate-database-oracle-com-v1alpha1-pdb", "UID": "5924d376-7a5c-4d4a-8ca9-040c585fb4b6", "kind": "database.oracle.com/v1alpha1, Kind=PDB", "resource": {"group":"database.oracle.com","version":"v1alpha1","resource":"pdbs"}} -2022-06-27T03:37:21Z INFO pdb-webhook ValidateUpdate-Validating PDB spec for : pdb1-clone -2022-06-27T03:37:21Z INFO pdb-webhook validateCommon {"name": "pdb1-clone"} -2022-06-27T03:37:21Z INFO pdb-webhook Valdiating PDB Resource Action : CLONE -2022-06-27T03:37:21Z DEBUG controller-runtime.webhook.webhooks wrote response {"webhook": "/validate-database-oracle-com-v1alpha1-pdb", "code": 200, "reason": "", "UID": "5924d376-7a5c-4d4a-8ca9-040c585fb4b6", "allowed": true} -2022-06-27T03:37:21Z INFO controllers.PDB Found PDB: pdb1-clone {"checkDuplicatePDB": "oracle-database-operator-system/pdb1-clone"} -2022-06-27T03:37:21Z INFO controllers.PDB Validating PDB phase for: pdb1-clone {"validatePhase": "oracle-database-operator-system/pdb1-clone", "Action": "CLONE"} -2022-06-27T03:37:21Z INFO controllers.PDB Validation complete {"validatePhase": "oracle-database-operator-system/pdb1-clone"} -2022-06-27T03:37:21Z INFO controllers.PDB PDB: {"onpremdboperator": "oracle-database-operator-system/pdb1-clone", "Name": "pdb1-clone", "Phase": "Cloning", "Status": "false"} -2022-06-27T03:37:21Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1-clone", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T03:37:21Z INFO controllers.PDB Issuing REST call {"callAPI": "oracle-database-operator-system/pdb1-clone", "URL": "http://cdb-dev-ords:8888/ords/_/db-api/latest/database/pdbs/pdbnew/", "Action": "POST"} -2022-06-27T03:37:21Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1-clone", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T03:38:01Z INFO controllers.PDB Cloned PDB successfully {"clonePDB": "oracle-database-operator-system/pdb1-clone", "Source PDB Name": "pdbnew", "Clone PDB Name": "pdbnewclone"} -2022-06-27T03:38:01Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1-clone", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T03:38:01Z DEBUG events Normal {"object": {"kind":"PDB","namespace":"oracle-database-operator-system","name":"pdb1-clone","uid":"16276c26-60a3-463b-bdd5-10bd328f9d43","apiVersion":"database.oracle.com/v1alpha1","resourceVersion":"101449482"}, "reason": "Created", "message": "PDB 'pdbnewclone' cloned successfully"} -2022-06-27T03:38:01Z INFO controllers.PDB Issuing REST call {"callAPI": "oracle-database-operator-system/pdb1-clone", "URL": "http://cdb-dev-ords:8888/ords/_/db-api/latest/database/pdbs/pdbnewclone/status", "Action": "GET"} -2022-06-27T03:38:01Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1-clone", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T03:38:01Z INFO controllers.PDB Successfully obtained PDB state {"getPDBState": "oracle-database-operator-system/pdb1-clone", "PDB Name": "pdbnewclone", "State": "READ WRITE"} -2022-06-27T03:38:01Z INFO controllers.PDB Reconcile completed {"onpremdboperator": "oracle-database-operator-system/pdb1-clone"} - - - --- Check the status of the new PDB CRD resource created by cloning: - -% kubectl get pdbs -A -NAMESPACE NAME CONNECT STRING CDB NAME PDB NAME PDB STATE PDB SIZE STATUS MESSAGE -oracle-database-operator-system pdb1 goldhost-scan.lbsub52b3b1cae.okecluster.oraclevcn.com:1521/pdbnew goldcdb pdbnew READ WRITE 1G Ready Success -oracle-database-operator-system pdb1-clone goldhost-scan.lbsub52b3b1cae.okecluster.oraclevcn.com:1521/pdbnewclone goldcdb pdbnewclone READ WRITE Ready Success - - --- Verify the new PDB created from the target CDB: - -SQL> show pdbs - - CON_ID CON_NAME OPEN MODE RESTRICTED ----------- ------------------------------ ---------- ---------- - 2 PDB$SEED READ WRITE NO - 3 PDBNEW READ WRITE NO - 4 PDBNEWCLONE READ WRITE NO diff --git a/docs/multitenant/provisioning/clone_pdb.md b/docs/multitenant/provisioning/clone_pdb.md deleted file mode 100644 index b731d301..00000000 --- a/docs/multitenant/provisioning/clone_pdb.md +++ /dev/null @@ -1,38 +0,0 @@ -# Clone a PDB using Oracle DB Operator On-Prem Controller in a target CDB - -In this use case, a PDB is cloned using Oracle DB Operator On-Prem controller. - -To clone a PDB CRD Resource, a sample .yaml file is available here: [config/samples/onpremdb/pdb_clone.yaml](../../../config/samples/onpremdb/pdb_clone.yaml) - -**NOTE:** It is assumed that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-manage-pdb-life-cycle-using-oracle-db-operator-on-prem-database-controller) steps. - -This example uses `clone_pdb.yaml` to clone a PDB using Oracle DB Operator On-Prem Controller with: - -- PDB CRD resource Name as `pdb1-clone` -- Pluggable Database (PDB) Name as `pdbnewclone` -- Total Size of the PDB as `UNLIMITED` -- Total size for temporary tablespace as `UNLIMITED` -- Target CDB CRD Resource Name as `cdb-dev` -- Target CDB name as `goldcdb` -- Source PDB Name as `pdbnew` - -**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [config/crd/bases/database.oracle.com_pdbs.yaml](../../../config/crd/bases/database.oracle.com_pdbs.yaml) - -Use the file: [clone_pdb.yaml](./clone_pdb.yaml) for this use case as below: - -1. Deploy the .yaml file: -```sh -[root@test-server oracle-database-operator]# kubectl apply -f clone_pdb.yaml -``` - -2. Monitor the Oracle DB Operator Pod for the progress of the PDB creation. - -NOTE: Check the DB Operator Pod name in your environment. - -``` -[root@test-server oracle-database-operator]# kubectl logs -f pod/oracle-database-operator-controller-manager-76cb674c5c-f9wsd -n oracle-database-operator-system -``` - -## Sample Output - -[Here](./clone_pdb.log) is the sample output for a PDB created using Oracle DB Operator On-Prem Controller using file [clone_pdb.yaml](./clone_pdb.yaml) diff --git a/docs/multitenant/provisioning/clone_pdb.yaml b/docs/multitenant/provisioning/clone_pdb.yaml deleted file mode 100644 index 7d3cfff9..00000000 --- a/docs/multitenant/provisioning/clone_pdb.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# -# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: PDB -metadata: - name: pdb1-clone - namespace: oracle-database-operator-system - labels: - cdb: cdb-dev -spec: - cdbResName: "cdb-dev" - cdbName: "goldcdb" - pdbName: "pdbnewclone" - srcPdbName: "pdbnew" - fileNameConversions: "NONE" - totalSize: "UNLIMITED" - tempSize: "UNLIMITED" - action: "Clone" diff --git a/docs/multitenant/provisioning/create_pdb.log b/docs/multitenant/provisioning/create_pdb.log deleted file mode 100644 index ebba9e66..00000000 --- a/docs/multitenant/provisioning/create_pdb.log +++ /dev/null @@ -1,139 +0,0 @@ --- Check the Oracle DB Operator Pod and CDB CRD Pod status: - -% kubectl get all -n oracle-database-operator-system -NAME READY STATUS RESTARTS AGE -pod/cdb-dev-ords-rs-5bztb 1/1 Running 0 5m23s -pod/cdb-dev-ords-rs-q2b68 1/1 Running 0 12m -pod/oracle-database-operator-controller-manager-76cb674c5c-4nrh8 1/1 Running 0 4d10h -pod/oracle-database-operator-controller-manager-76cb674c5c-f9wsd 1/1 Running 1 4d10h -pod/oracle-database-operator-controller-manager-76cb674c5c-xsv9g 1/1 Running 2 4d10h - -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/cdb-dev-ords ClusterIP None 11m -service/oracle-database-operator-controller-manager-metrics-service ClusterIP 10.96.98.47 8443/TCP 5d2h -service/oracle-database-operator-webhook-service ClusterIP 10.96.166.163 443/TCP 5d2h - -NAME READY UP-TO-DATE AVAILABLE AGE -deployment.apps/oracle-database-operator-controller-manager 3/3 3 3 5d2h - -NAME DESIRED CURRENT READY AGE -replicaset.apps/cdb-dev-ords-rs 2 2 2 12m -replicaset.apps/oracle-database-operator-controller-manager-76cb674c5c 3 3 3 5d2h - - - --- PDB secrets in this use case were created using the below file: - -% cat pdb_secret.yaml -# -# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: v1 -kind: Secret -metadata: - name: pdb1-secret - namespace: oracle-database-operator-system -type: Opaque -data: - sysadmin_user: "cGRiYWRtaW4=" - sysadmin_pwd: "V0VsY29tZV8yMSMj" - - --- This is the .yaml file used to create a PDB CRD resource in this use case: - -% cat pdb_create.yaml -# -# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: PDB -metadata: - name: pdb1 - namespace: oracle-database-operator-system - labels: - cdb: cdb-dev -spec: - cdbResName: "cdb-dev" - cdbName: "goldcdb" - pdbName: "pdbnew" - adminName: - secret: - secretName: "pdb1-secret" - key: "sysadmin_user" - adminPwd: - secret: - secretName: "pdb1-secret" - key: "sysadmin_pwd" - fileNameConversions: "NONE" - totalSize: "1G" - tempSize: "100M" - action: "Create" - - - - --- Apply the .yaml files: - -% kubectl apply -f pdb_secret.yaml -secret/pdb1-secret created - -% kubectl apply -f pdb_create.yaml -pdb.database.oracle.com/pdb1 created - - --- Monitor the logs from the Oracle DB Operator Pod: - -% kubectl logs -f pod/oracle-database-operator-controller-manager-76cb674c5c-f9wsd -n oracle-database-operator-system -. -. -2022-06-27T03:28:30Z DEBUG controller-runtime.webhook.webhooks received request {"webhook": "/mutate-database-oracle-com-v1alpha1-pdb", "UID": "6fca0e37-8fd9-4ccd-86ad-2edec604a28b", "kind": "database.oracle.com/v1alpha1, Kind=PDB", "resource": {"group":"database.oracle.com","version":"v1alpha1","resource":"pdbs"}} -2022-06-27T03:28:30Z INFO pdb-webhook Setting default values in PDB spec for : pdb1 -2022-06-27T03:28:30Z INFO pdb-webhook - reuseTempFile : true -2022-06-27T03:28:30Z INFO pdb-webhook - unlimitedStorage : true -2022-06-27T03:28:30Z INFO pdb-webhook - tdeImport : false -2022-06-27T03:28:30Z INFO pdb-webhook - tdeExport : false -2022-06-27T03:28:30Z INFO pdb-webhook - asClone : false -2022-06-27T03:28:30Z DEBUG controller-runtime.webhook.webhooks wrote response {"webhook": "/mutate-database-oracle-com-v1alpha1-pdb", "code": 200, "reason": "", "UID": "6fca0e37-8fd9-4ccd-86ad-2edec604a28b", "allowed": true} -2022-06-27T03:28:30Z DEBUG controller-runtime.webhook.webhooks received request {"webhook": "/validate-database-oracle-com-v1alpha1-pdb", "UID": "6ae20043-fa1a-4eba-b943-a2266183da48", "kind": "database.oracle.com/v1alpha1, Kind=PDB", "resource": {"group":"database.oracle.com","version":"v1alpha1","resource":"pdbs"}} -2022-06-27T03:28:30Z INFO pdb-webhook ValidateUpdate-Validating PDB spec for : pdb1 -2022-06-27T03:28:30Z INFO pdb-webhook validateCommon {"name": "pdb1"} -2022-06-27T03:28:30Z INFO pdb-webhook Valdiating PDB Resource Action : CREATE -2022-06-27T03:28:30Z DEBUG controller-runtime.webhook.webhooks wrote response {"webhook": "/validate-database-oracle-com-v1alpha1-pdb", "code": 200, "reason": "", "UID": "6ae20043-fa1a-4eba-b943-a2266183da48", "allowed": true} -2022-06-27T03:28:30Z INFO controllers.PDB Reconcile requested {"onpremdboperator": "oracle-database-operator-system/pdb1"} -2022-06-27T03:28:30Z INFO controllers.PDB Validating PDB phase for: pdb1 {"validatePhase": "oracle-database-operator-system/pdb1", "Action": "CREATE"} -2022-06-27T03:28:30Z INFO controllers.PDB Validation complete {"validatePhase": "oracle-database-operator-system/pdb1"} -2022-06-27T03:28:30Z INFO controllers.PDB PDB: {"onpremdboperator": "oracle-database-operator-system/pdb1", "Name": "pdb1", "Phase": "Creating", "Status": "false"} -2022-06-27T03:28:30Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T03:28:30Z INFO controllers.PDB Issuing REST call {"callAPI": "oracle-database-operator-system/pdb1", "URL": "http://cdb-dev-ords:8888/ords/_/db-api/latest/database/pdbs/", "Action": "POST"} -2022-06-27T03:28:30Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T03:29:04Z INFO controllers.PDB Created PDB Resource {"createPDB": "oracle-database-operator-system/pdb1", "PDB Name": "pdbnew"} -2022-06-27T03:29:04Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T03:29:04Z DEBUG events Normal {"object": {"kind":"PDB","namespace":"oracle-database-operator-system","name":"pdb1","uid":"81f2e686-6e1b-4e2c-8a2f-e20c2f99d6b9","apiVersion":"database.oracle.com/v1alpha1","resourceVersion":"101446779"}, "reason": "Created", "message": "PDB 'pdbnew' created successfully"} -2022-06-27T03:29:04Z INFO controllers.PDB Issuing REST call {"callAPI": "oracle-database-operator-system/pdb1", "URL": "http://cdb-dev-ords:8888/ords/_/db-api/latest/database/pdbs/pdbnew/status", "Action": "GET"} -2022-06-27T03:29:04Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T03:29:04Z INFO controllers.PDB Successfully obtained PDB state {"getPDBState": "oracle-database-operator-system/pdb1", "PDB Name": "pdbnew", "State": "READ WRITE"} -2022-06-27T03:29:04Z INFO controllers.PDB Reconcile completed {"onpremdboperator": "oracle-database-operator-system/pdb1"} - - - - --- Check the status of the PDB CRD Resource created: - -% kubectl get pdbs -A -NAMESPACE NAME CONNECT STRING CDB NAME PDB NAME PDB STATE PDB SIZE STATUS MESSAGE -oracle-database-operator-system pdb1 goldhost-scan.lbsub52b3b1cae.okecluster.oraclevcn.com:1521/pdbnew goldcdb pdbnew READ WRITE 1G Ready Success - - --- The status as "Ready" and message as "Success" confirms that the resource has been created successfully. - - --- Verify that the PDB is created from CDB: - -SQL> show pdbs - - CON_ID CON_NAME OPEN MODE RESTRICTED ----------- ------------------------------ ---------- ---------- - 2 PDB$SEED READ WRITE NO - 3 PDBNEW READ WRITE NO diff --git a/docs/multitenant/provisioning/create_pdb.md b/docs/multitenant/provisioning/create_pdb.md deleted file mode 100644 index 4f6e57aa..00000000 --- a/docs/multitenant/provisioning/create_pdb.md +++ /dev/null @@ -1,37 +0,0 @@ -# Create a PDB using Oracle DB Operator On-Prem Controller in a target CDB - -The Oracle Database Operator On-Prem Controller creates the PDB kind as a custom resource that models a PDB as a native Kubernetes object. There is a one-to-one mapping between the actual PDB and the Kubernetes PDB Custom Resource. Each PDB resource follows the PDB CRD as defined here: [config/crd/bases/database.oracle.com_pdbs.yaml](../../../config/crd/bases/database.oracle.com_pdbs.yaml) - -To create a PDB CRD Resource, a sample .yaml file is available here: [config/samples/onpremdb/pdb_create.yaml](../../../config/samples/onpremdb/pdb_create.yaml) - -**NOTE:** It is assumed that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-manage-pdb-life-cycle-using-oracle-db-operator-on-prem-database-controller) steps. - -This example uses `create_pdb.yaml` to create a PDB using Oracle DB Operator On-Prem Controller with: - -- PDB CRD resource Name as `pdb1` -- Pluggable Database (PDB) Name as `pdbnew` -- Total Size of the PDB as `1GB` -- Total size for temporary tablespace as `100M` -- Target CDB CRD Resource Name as `cdb-dev` -- Target CDB name as `goldcdb` - -**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [config/crd/bases/database.oracle.com_pdbs.yaml](../../../config/crd/bases/database.oracle.com_pdbs.yaml) - -Use the file: [create_pdb.yaml](./create_pdb.yaml) for this use case as below: - -1. Deploy the .yaml file: -```sh -[root@test-server oracle-database-operator]# kubectl apply -f create_pdb.yaml -``` - -2. Monitor the Oracle DB Operator Pod for the progress of the PDB creation. - -NOTE: Check the DB Operator Pod name in your environment. - -``` -[root@test-server oracle-database-operator]# kubectl logs -f pod/oracle-database-operator-controller-manager-76cb674c5c-f9wsd -n oracle-database-operator-system -``` - -## Sample Output - -[Here](./create_pdb.log) is the sample output for a PDB created using Oracle DB Operator On-Prem Controller using file [create_pdb.yaml](./create_pdb.yaml) diff --git a/docs/multitenant/provisioning/create_pdb.yaml b/docs/multitenant/provisioning/create_pdb.yaml deleted file mode 100644 index 82941185..00000000 --- a/docs/multitenant/provisioning/create_pdb.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# -# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: PDB -metadata: - name: pdb1 - namespace: oracle-database-operator-system - labels: - cdb: cdb-dev -spec: - cdbResName: "cdb-dev" - cdbName: "goldcdb" - pdbName: "pdbnew" - adminName: - secret: - secretName: "pdb1-secret" - key: "sysadmin_user" - adminPwd: - secret: - secretName: "pdb1-secret" - key: "sysadmin_pwd" - fileNameConversions: "NONE" - totalSize: "1G" - tempSize: "100M" - action: "Create" diff --git a/docs/multitenant/provisioning/delete_pdb.log b/docs/multitenant/provisioning/delete_pdb.log deleted file mode 100644 index 7f361871..00000000 --- a/docs/multitenant/provisioning/delete_pdb.log +++ /dev/null @@ -1,157 +0,0 @@ --- Check the existing PDB CRD resources: - -% kubectl get pdbs -A -NAMESPACE NAME CONNECT STRING CDB NAME PDB NAME PDB STATE PDB SIZE STATUS MESSAGE -oracle-database-operator-system pdb1 goldhost-scan.lbsub52b3b1cae.okecluster.oraclevcn.com:1521/pdbnew goldcdb pdbnew READ WRITE 1G Ready Success -oracle-database-operator-system pdb1-clone goldhost-scan.lbsub52b3b1cae.okecluster.oraclevcn.com:1521/pdbnewclone goldcdb pdbnewclone READ WRITE Ready Success - - --- Also check from the database as well: - -SQL> show pdbs - - CON_ID CON_NAME OPEN MODE RESTRICTED ----------- ------------------------------ ---------- ---------- - 2 PDB$SEED READ WRITE NO - 3 PDBNEW READ WRITE NO - 4 PDBNEWCLONE READ WRITE NO - - - -% cat modify_pdb_close.yaml -# -# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: PDB -metadata: - name: pdb1-clone - namespace: oracle-database-operator-system - labels: - cdb: cdb-dev -spec: - cdbResName: "cdb-dev" - cdbName: "goldcdb" - pdbName: "pdbnewclone" - pdbState: "CLOSE" - modifyOption: "IMMEDIATE" - action: "Modify" - - -% kubectl apply -f modify_pdb_close.yaml -pdb.database.oracle.com/pdb1-clone configured - - - -% kubectl logs -f pod/oracle-database-operator-controller-manager-76cb674c5c-f9wsd -n oracle-database-operator-system -. -. -2022-06-27T04:19:36Z DEBUG controller-runtime.webhook.webhooks received request {"webhook": "/mutate-database-oracle-com-v1alpha1-pdb", "UID": "24842cc8-0047-46cc-86a5-2782a95e3e36", "kind": "database.oracle.com/v1alpha1, Kind=PDB", "resource": {"group":"database.oracle.com","version":"v1alpha1","resource":"pdbs"}} -2022-06-27T04:19:36Z INFO pdb-webhook Setting default values in PDB spec for : pdb1-clone -2022-06-27T04:19:36Z DEBUG controller-runtime.webhook.webhooks wrote response {"webhook": "/mutate-database-oracle-com-v1alpha1-pdb", "code": 200, "reason": "", "UID": "24842cc8-0047-46cc-86a5-2782a95e3e36", "allowed": true} -2022-06-27T04:19:36Z INFO controllers.PDB Reconcile requested {"onpremdboperator": "oracle-database-operator-system/pdb1-clone"} -2022-06-27T04:19:36Z INFO controllers.PDB Validating PDB phase for: pdb1-clone {"validatePhase": "oracle-database-operator-system/pdb1-clone", "Action": "MODIFY"} -2022-06-27T04:19:36Z INFO controllers.PDB Validation complete {"validatePhase": "oracle-database-operator-system/pdb1-clone"} -2022-06-27T04:19:36Z INFO controllers.PDB PDB: {"onpremdboperator": "oracle-database-operator-system/pdb1-clone", "Name": "pdb1-clone", "Phase": "Modifying", "Status": "false"} -2022-06-27T04:19:36Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1-clone", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T04:19:36Z INFO controllers.PDB Issuing REST call {"callAPI": "oracle-database-operator-system/pdb1-clone", "URL": "http://cdb-dev-ords:8888/ords/_/db-api/latest/database/pdbs/pdbnewclone/status", "Action": "GET"} -2022-06-27T04:19:36Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1-clone", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T04:19:38Z INFO controllers.PDB Successfully obtained PDB state {"getPDBState": "oracle-database-operator-system/pdb1-clone", "PDB Name": "pdbnewclone", "State": "READ WRITE"} -2022-06-27T04:19:38Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1-clone", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T04:19:38Z INFO controllers.PDB Issuing REST call {"callAPI": "oracle-database-operator-system/pdb1-clone", "URL": "http://cdb-dev-ords:8888/ords/_/db-api/latest/database/pdbs/pdbnewclone/status", "Action": "POST"} -2022-06-27T04:19:38Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1-clone", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T04:19:39Z INFO controllers.PDB Successfully modified PDB state {"modifyPDB": "oracle-database-operator-system/pdb1-clone", "PDB Name": "pdbnewclone"} -2022-06-27T04:19:39Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1-clone", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T04:19:39Z DEBUG events Normal {"object": {"kind":"PDB","namespace":"oracle-database-operator-system","name":"pdb1-clone","uid":"309dd711-198b-45b6-a34b-da5069af70fb","apiVersion":"database.oracle.com/v1alpha1","resourceVersion":"101462484"}, "reason": "Modified", "message": "PDB 'pdbnewclone' modified successfully"} -2022-06-27T04:19:39Z INFO controllers.PDB Issuing REST call {"callAPI": "oracle-database-operator-system/pdb1-clone", "URL": "http://cdb-dev-ords:8888/ords/_/db-api/latest/database/pdbs/pdbnewclone/status", "Action": "GET"} -2022-06-27T04:19:39Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1-clone", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T04:19:39Z INFO controllers.PDB Successfully obtained PDB state {"getPDBState": "oracle-database-operator-system/pdb1-clone", "PDB Name": "pdbnewclone", "State": "MOUNTED"} -2022-06-27T04:19:39Z INFO controllers.PDB Reconcile completed {"onpremdboperator": "oracle-database-operator-system/pdb1-clone"} - - - -SQL> show pdbs - - CON_ID CON_NAME OPEN MODE RESTRICTED ----------- ------------------------------ ---------- ---------- - 2 PDB$SEED READ WRITE NO - 3 PDBNEW READ WRITE NO - 4 PDBNEWCLONE MOUNTED - - --- Check the .yaml file to be used to delete a PDB: - -% cat delete_pdb.yaml -# -# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: PDB -metadata: - name: pdb1-clone - namespace: oracle-database-operator-system - labels: - cdb: cdb-dev -spec: - cdbResName: "cdb-dev" - pdbName: "pdbnewclone" - action: "Delete" - dropAction: "INCLUDING" - - - --- Apply the .yaml file: - -% kubectl apply -f delete_pdb.yaml -pdb.database.oracle.com/pdb1-clone configured - - --- Monitor the Oracle DB Operator Pod logs: - -% kubectl logs -f pod/oracle-database-operator-controller-manager-76cb674c5c-f9wsd -n oracle-database-operator-system -. -. -2022-06-27T04:21:37Z DEBUG controller-runtime.webhook.webhooks received request {"webhook": "/validate-database-oracle-com-v1alpha1-pdb", "UID": "64148dda-d0df-4e03-88e3-98b1ce7b7aaf", "kind": "database.oracle.com/v1alpha1, Kind=PDB", "resource": {"group":"database.oracle.com","version":"v1alpha1","resource":"pdbs"}} -2022-06-27T04:21:37Z INFO pdb-webhook ValidateUpdate-Validating PDB spec for : pdb1-clone -2022-06-27T04:21:37Z INFO pdb-webhook validateCommon {"name": "pdb1-clone"} -2022-06-27T04:21:37Z INFO pdb-webhook Valdiating PDB Resource Action : DELETE -2022-06-27T04:21:37Z DEBUG controller-runtime.webhook.webhooks wrote response {"webhook": "/validate-database-oracle-com-v1alpha1-pdb", "code": 200, "reason": "", "UID": "64148dda-d0df-4e03-88e3-98b1ce7b7aaf", "allowed": true} -2022-06-27T04:21:37Z INFO controllers.PDB Reconcile requested {"onpremdboperator": "oracle-database-operator-system/pdb1-clone"} -2022-06-27T04:21:37Z INFO controllers.PDB Validating PDB phase for: pdb1-clone {"validatePhase": "oracle-database-operator-system/pdb1-clone", "Action": "DELETE"} -2022-06-27T04:21:37Z INFO controllers.PDB Validation complete {"validatePhase": "oracle-database-operator-system/pdb1-clone"} -2022-06-27T04:21:37Z INFO controllers.PDB PDB: {"onpremdboperator": "oracle-database-operator-system/pdb1-clone", "Name": "pdb1-clone", "Phase": "Deleting", "Status": "false"} -2022-06-27T04:21:37Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1-clone", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T04:21:37Z INFO controllers.PDB Issuing REST call {"callAPI": "oracle-database-operator-system/pdb1-clone", "URL": "http://cdb-dev-ords:8888/ords/_/db-api/latest/database/pdbs/pdbnewclone/", "Action": "DELETE"} -2022-06-27T04:21:37Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1-clone", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T04:21:39Z INFO controllers.PDB Successfully dropped PDB {"deletePDBInstance": "oracle-database-operator-system/pdb1-clone", "PDB Name": "pdbnewclone"} -2022-06-27T04:21:39Z INFO controllers.PDB Removing finalizer {"deletePDB": "oracle-database-operator-system/pdb1-clone"} -2022-06-27T04:21:39Z DEBUG controller-runtime.webhook.webhooks received request {"webhook": "/validate-database-oracle-com-v1alpha1-pdb", "UID": "15bd320b-3f9f-46a7-8493-c586310b7d84", "kind": "database.oracle.com/v1alpha1, Kind=PDB", "resource": {"group":"database.oracle.com","version":"v1alpha1","resource":"pdbs"}} -2022-06-27T04:21:39Z INFO pdb-webhook ValidateUpdate-Validating PDB spec for : pdb1-clone -2022-06-27T04:21:39Z INFO pdb-webhook validateCommon {"name": "pdb1-clone"} -2022-06-27T04:21:39Z INFO pdb-webhook Valdiating PDB Resource Action : DELETE -2022-06-27T04:21:39Z DEBUG controller-runtime.webhook.webhooks wrote response {"webhook": "/validate-database-oracle-com-v1alpha1-pdb", "code": 200, "reason": "", "UID": "15bd320b-3f9f-46a7-8493-c586310b7d84", "allowed": true} -2022-06-27T04:21:39Z INFO controllers.PDB Successfully deleted PDB resource {"deletePDB": "oracle-database-operator-system/pdb1-clone"} -2022-06-27T04:21:39Z INFO controllers.PDB Reconcile completed {"onpremdboperator": "oracle-database-operator-system/pdb1-clone"} -2022-06-27T04:21:39Z DEBUG events Normal {"object": {"kind":"PDB","namespace":"oracle-database-operator-system","name":"pdb1-clone","uid":"309dd711-198b-45b6-a34b-da5069af70fb","apiVersion":"database.oracle.com/v1alpha1","resourceVersion":"101463106"}, "reason": "Deleted", "message": "PDB 'pdbnewclone' dropped successfully"} - - - - - --- Check the PDB CRD resources: - -% kubectl get pdbs -A -NAMESPACE NAME CONNECT STRING CDB NAME PDB NAME PDB STATE PDB SIZE STATUS MESSAGE -oracle-database-operator-system pdb1 goldhost-scan.lbsub52b3b1cae.okecluster.oraclevcn.com:1521/pdbnew goldcdb pdbnew READ WRITE 1G Ready Success - - --- Verify from the CDB: - -SQL> show pdbs - - CON_ID CON_NAME OPEN MODE RESTRICTED ----------- ------------------------------ ---------- ---------- - 2 PDB$SEED READ WRITE NO - 3 PDBNEW READ WRITE NO diff --git a/docs/multitenant/provisioning/delete_pdb.md b/docs/multitenant/provisioning/delete_pdb.md deleted file mode 100644 index e36bb473..00000000 --- a/docs/multitenant/provisioning/delete_pdb.md +++ /dev/null @@ -1,37 +0,0 @@ -# Delete a PDB using Oracle DB Operator On-Prem Controller in a target CDB - -In this use case, a PDB is deleted using Oracle DB Operator On-Prem controller. - -To delete a PDB CRD Resource, a sample .yaml file is available here: [config/samples/onpremdb/pdb_delete.yaml](../../../config/samples/onpremdb/pdb_delete.yaml) - -**NOTE:** It is assumed that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-manage-pdb-life-cycle-using-oracle-db-operator-on-prem-database-controller) steps. - -This example uses `delete_pdb.yaml` to delete a PDB using Oracle DB Operator On-Prem Controller with: - -- Pluggable Database (PDB) Name as `pdbnewclone` -- Target CDB CRD Resource Name as `cdb-dev` -- Action to be taken on the PDB as `Delete` -- Option to specify if datafiles should be removed as `INCLUDING` - -**NOTE:** You need to *modify* the PDB status to MOUNTED, as described earlier, on the target CDB before you want to delete that PDB. - -**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [config/crd/bases/database.oracle.com_pdbs.yaml](../../../config/crd/bases/database.oracle.com_pdbs.yaml) - -Use the file: [delete_pdb.yaml](./delete_pdb.yaml) for this use case as below: - -1. Deploy the .yaml file: -```sh -% kubectl apply -f delete_pdb.yaml -``` - -2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the PDB deletion. - -NOTE: Check the DB Operator Pod name in your environment. - -```sh -% kubectl logs -f pod/oracle-database-operator-controller-manager-76cb674c5c-f9wsd -n oracle-database-operator-system -``` - -## Sample Output - -[Here](./delete_pdb.log) is the sample output for a PDB created using Oracle DB Operator On-Prem Controller using file [delete_pdb.yaml](./delete_pdb.yaml) diff --git a/docs/multitenant/provisioning/delete_pdb.yaml b/docs/multitenant/provisioning/delete_pdb.yaml deleted file mode 100644 index d16084bb..00000000 --- a/docs/multitenant/provisioning/delete_pdb.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# -# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: PDB -metadata: - name: pdb1-clone - namespace: oracle-database-operator-system - labels: - cdb: cdb-dev -spec: - cdbResName: "cdb-dev" - pdbName: "pdbnewclone" - action: "Delete" - dropAction: "INCLUDING" diff --git a/docs/multitenant/provisioning/example_setup_using_oci_oke_cluster.md b/docs/multitenant/provisioning/example_setup_using_oci_oke_cluster.md index f3ed2489..d56efacb 100644 --- a/docs/multitenant/provisioning/example_setup_using_oci_oke_cluster.md +++ b/docs/multitenant/provisioning/example_setup_using_oci_oke_cluster.md @@ -18,7 +18,7 @@ Below are the main steps that will be involved in this setup: Check the [Oracle Documentation](https://docs.oracle.com/en-us/iaas/Content/ContEng/Concepts/contengnetworkconfigexample.htm#example-privatek8sapi-privateworkers-publiclb) for the OKE rules settings. -Create OKE cluster with CUSTOM option to use same VCN where ExaCS is provisioned. +Create OKE cluster with CUSTOM option to use same VCN where ExaCS is provisioned. **NOTE:** Make sure you choose same VCN exaphxvcn where ExaCS is provisioned. @@ -35,3 +35,4 @@ NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL- ``` Once this setup is ready, you can proceed with the installation of [Oracle Database Operator for Kubernetes](https://github.com/oracle/oracle-database-operator/blob/main/README.md) to use the Oracle On-prem controller to manage PDBs in this CDB. + diff --git a/docs/multitenant/provisioning/known_issues.md b/docs/multitenant/provisioning/known_issues.md deleted file mode 100644 index 3ab19897..00000000 --- a/docs/multitenant/provisioning/known_issues.md +++ /dev/null @@ -1,49 +0,0 @@ -# Known Issues - -Please refer to the below list of known issues related to Oracle DB Operator On-Prem Controller: - -1. **ORA-20002: ERROR: The user ORDS_PUBLIC_USER already exists in the logs of CDB CRD Pods.** - -This error is expected when you are deploying `2` replicas of CDB CRD during the deployment of the CDB CRD. Below is the snippet of a possible error: -``` -2022-06-22T20:06:32.616Z INFO Installing Oracle REST Data Services version 21.4.2.r0621806 in CDB$ROOT -2022-06-22T20:06:32.663Z INFO ... Log file written to /home/oracle/ords_cdb_install_core_CDB_ROOT_2022-06-22_200632_00662.log -2022-06-22T20:06:33.835Z INFO CDB restart file created in /home/oracle/ords_restart_2022-06-22_200633_00835.properties -2022-06-22T20:06:33.837Z SEVERE Error executing script: ords_prereq_env.sql Error: ORA-20002: ERROR: The user ORDS_PUBLIC_USER already exists. You must first uninstall ORDS using ords_uninstall.sql prior to running the install scripts. -ORA-06512: at line 8 -ORA-06512: at line 8 - - Refer to log file /home/oracle/ords_cdb_install_core_CDB_ROOT_2022-06-22_200632_00662.log for details - -java.io.IOException: Error executing script: ords_prereq_env.sql Error: ORA-20002: ERROR: The user ORDS_PUBLIC_USER already exists. You must first uninstall ORDS using ords_uninstall.sql prior to running the install scripts. -ORA-06512: at line 8 -ORA-06512: at line 8 -``` -This error is seen in the logs of one of the two CDB CRD pods. The other Pod `does not` show this error and the ORDS installation is done successfully. - -To avoid this error, you need to initially deploy the CDB CRD with a single replica and later add another replica as per need. - -2. **PDB create failure with error "Failed: Unauthorized"** - -It was observed that PDB creation fails with the below error when special characters like "_" or "#" were used in the password for user SQL_ADMIN: -``` -2022-06-22T20:10:09Z INFO controllers.PDB ORDS Error - HTTP Status Code :401 {"callAPI": "oracle-database-operator-system/pdb1", "Err": "\n{\n \"code\": \"Unauthorized\",\n \"message\": \"Unauthorized\",\n \"type\": \"tag:oracle.com,2020:error/Unauthorized\",\n \"instance\": \"tag:oracle.com,2020:ecid/OoqA0Zw3oBWdabzP8wUMcQ\"\n}"} -2022-06-22T20:10:09Z INFO controllers.PDB Reconcile completed {"onpremdboperator": "oracle-database-operator-system/pdb1"} -2022-06-22T20:10:09Z DEBUG events Warning {"object": {"kind":"PDB","namespace":"oracle-database-operator-system","name":"pdb1","uid":"19fc98b1-ca7f-4e63-a6c7-fdeb14b8c275","apiVersion":"database.oracle.com/v1alpha1","resourceVersion":"99558229"}, "reason": "ORDSError", "message": "Failed: Unauthorized"} -``` - -In testing, we have used the password `welcome1` for the user SQL_ADMIN. - -To avoid this error, please avoid password `welcome1` for SQL_ADMIN user. - - -3. **After cloning a PDB from another PDB, PDB SIZE field is show as empty even if the .yaml file used during the PDB cloning specifies the PDB size:** - -```sh -% kubectl get pdbs -A -NAMESPACE NAME CONNECT STRING CDB NAME PDB NAME PDB STATE PDB SIZE STATUS MESSAGE -oracle-database-operator-system pdb1 goldhost-scan.lbsub52b3b1cae.okecluster.oraclevcn.com:1521/pdbnew goldcdb pdbnew READ WRITE 1G Ready Success -oracle-database-operator-system pdb1-clone goldhost-scan.lbsub52b3b1cae.okecluster.oraclevcn.com:1521/pdbnewclone goldcdb pdbnewclone READ WRITE Ready Success -``` - -In the above example the PDB `pdbnewclone` is cloned from PDB `pdbnew` and is showing the size column as EMPTY. This will be fixed in future version. diff --git a/docs/multitenant/provisioning/modify_pdb.log b/docs/multitenant/provisioning/modify_pdb.log deleted file mode 100644 index 151393a7..00000000 --- a/docs/multitenant/provisioning/modify_pdb.log +++ /dev/null @@ -1,181 +0,0 @@ ------ Closing a PDB ------ - --- Check the existing PDB CRD resources - -jyotiprakashverma@jyotiprakashverma-mac onprem_test % kubectl get pdbs -A -NAMESPACE NAME CONNECT STRING CDB NAME PDB NAME PDB STATE PDB SIZE STATUS MESSAGE -oracle-database-operator-system pdb1 goldhost-scan.lbsub52b3b1cae.okecluster.oraclevcn.com:1521/pdbnew goldcdb pdbnew READ WRITE 1G Ready Success -oracle-database-operator-system pdb1-clone goldhost-scan.lbsub52b3b1cae.okecluster.oraclevcn.com:1521/pdbnewclone goldcdb pdbnewclone READ WRITE Ready Success - - - - --- Check the status of the PDBs in the CDB: - -SQL> show pdbs - - CON_ID CON_NAME OPEN MODE RESTRICTED ----------- ------------------------------ ---------- ---------- - 2 PDB$SEED READ WRITE NO - 3 PDBNEW READ WRITE NO - 5 PDBNEWCLONE READ WRITE NO - - - --- Check the file to modify the PDB state to CLOSE: - -% cat modify_pdb_close.yaml -# -# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: PDB -metadata: - name: pdb1 - namespace: oracle-database-operator-system - labels: - cdb: cdb-dev -spec: - cdbResName: "cdb-dev" - cdbName: "goldcdb" - pdbName: "pdbnew" - pdbState: "CLOSE" - modifyOption: "IMMEDIATE" - action: "Modify" - - --- Apply the file: - -% kubectl apply -f modify_pdb_close.yaml -pdb.database.oracle.com/pdb1 configured - - --- Monitor the logs from the Oracle DB Operator Pod: - -% kubectl logs -f pod/oracle-database-operator-controller-manager-76cb674c5c-f9wsd -n oracle-database-operator-system -. -. -2022-06-27T03:44:36Z DEBUG controller-runtime.webhook.webhooks received request {"webhook": "/mutate-database-oracle-com-v1alpha1-pdb", "UID": "08f95926-6bf1-4c70-b319-1b17015ce22a", "kind": "database.oracle.com/v1alpha1, Kind=PDB", "resource": {"group":"database.oracle.com","version":"v1alpha1","resource":"pdbs"}} -2022-06-27T03:44:36Z INFO pdb-webhook Setting default values in PDB spec for : pdb1 -2022-06-27T03:44:36Z DEBUG controller-runtime.webhook.webhooks wrote response {"webhook": "/mutate-database-oracle-com-v1alpha1-pdb", "code": 200, "reason": "", "UID": "08f95926-6bf1-4c70-b319-1b17015ce22a", "allowed": true} -2022-06-27T03:44:36Z DEBUG controller-runtime.webhook.webhooks received request {"webhook": "/validate-database-oracle-com-v1alpha1-pdb", "UID": "64ebebe2-87f2-4237-8928-532365f3cca9", "kind": "database.oracle.com/v1alpha1, Kind=PDB", "resource": {"group":"database.oracle.com","version":"v1alpha1","resource":"pdbs"}} -2022-06-27T03:44:36Z INFO pdb-webhook ValidateUpdate-Validating PDB spec for : pdb1 -2022-06-27T03:44:36Z INFO pdb-webhook validateCommon {"name": "pdb1"} -2022-06-27T03:44:36Z INFO pdb-webhook Valdiating PDB Resource Action : MODIFY -2022-06-27T03:44:36Z DEBUG controller-runtime.webhook.webhooks wrote response {"webhook": "/validate-database-oracle-com-v1alpha1-pdb", "code": 200, "reason": "", "UID": "64ebebe2-87f2-4237-8928-532365f3cca9", "allowed": true} -2022-06-27T03:44:36Z INFO controllers.PDB Reconcile requested {"onpremdboperator": "oracle-database-operator-system/pdb1"} -2022-06-27T03:44:36Z INFO controllers.PDB Validating PDB phase for: pdb1 {"validatePhase": "oracle-database-operator-system/pdb1", "Action": "MODIFY"} -2022-06-27T03:44:36Z INFO controllers.PDB Validation complete {"validatePhase": "oracle-database-operator-system/pdb1"} -2022-06-27T03:44:36Z INFO controllers.PDB PDB: {"onpremdboperator": "oracle-database-operator-system/pdb1", "Name": "pdb1", "Phase": "Modifying", "Status": "false"} -2022-06-27T03:44:36Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T03:44:36Z INFO controllers.PDB Issuing REST call {"callAPI": "oracle-database-operator-system/pdb1", "URL": "http://cdb-dev-ords:8888/ords/_/db-api/latest/database/pdbs/pdbnew/status", "Action": "GET"} -2022-06-27T03:44:36Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T03:44:36Z INFO controllers.PDB Successfully obtained PDB state {"getPDBState": "oracle-database-operator-system/pdb1", "PDB Name": "pdbnew", "State": "READ WRITE"} -2022-06-27T03:44:36Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T03:44:36Z INFO controllers.PDB Issuing REST call {"callAPI": "oracle-database-operator-system/pdb1", "URL": "http://cdb-dev-ords:8888/ords/_/db-api/latest/database/pdbs/pdbnew/status", "Action": "POST"} -2022-06-27T03:44:36Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T03:44:38Z INFO controllers.PDB Successfully modified PDB state {"modifyPDB": "oracle-database-operator-system/pdb1", "PDB Name": "pdbnew"} -2022-06-27T03:44:38Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T03:44:38Z DEBUG events Normal {"object": {"kind":"PDB","namespace":"oracle-database-operator-system","name":"pdb1","uid":"81f2e686-6e1b-4e2c-8a2f-e20c2f99d6b9","apiVersion":"database.oracle.com/v1alpha1","resourceVersion":"101451707"}, "reason": "Modified", "message": "PDB 'pdbnew' modified successfully"} -2022-06-27T03:44:38Z INFO controllers.PDB Issuing REST call {"callAPI": "oracle-database-operator-system/pdb1", "URL": "http://cdb-dev-ords:8888/ords/_/db-api/latest/database/pdbs/pdbnew/status", "Action": "GET"} -2022-06-27T03:44:38Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T03:44:38Z INFO controllers.PDB Successfully obtained PDB state {"getPDBState": "oracle-database-operator-system/pdb1", "PDB Name": "pdbnew", "State": "MOUNTED"} -2022-06-27T03:44:38Z INFO controllers.PDB Reconcile completed {"onpremdboperator": "oracle-database-operator-system/pdb1"} - - - - --- Check the status of PDB CRD resources - -jyotiprakashverma@jyotiprakashverma-mac onprem_test % kubectl get pdbs -A -NAMESPACE NAME CONNECT STRING CDB NAME PDB NAME PDB STATE PDB SIZE STATUS MESSAGE -oracle-database-operator-system pdb1 goldhost-scan.lbsub52b3b1cae.okecluster.oraclevcn.com:1521/pdbnew goldcdb pdbnew MOUNTED 1G Ready Success -oracle-database-operator-system pdb1-clone goldhost-scan.lbsub52b3b1cae.okecluster.oraclevcn.com:1521/pdbnewclone goldcdb pdbnewclone READ WRITE Ready Success - - - --- Confirm the status of the PDB in the CDB: - -SQL> show pdbs - - CON_ID CON_NAME OPEN MODE RESTRICTED ----------- ------------------------------ ---------- ---------- - 2 PDB$SEED READ WRITE NO - 3 PDBNEW MOUNTED - 4 PDBNEWCLONE READ WRITE NO - - - - - - - - ------ Opening a PDB ------ - --- Check the .yaml file to open the PDB: - -% cat modify_pdb_open.yaml -# -# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: PDB -metadata: - name: pdb1 - namespace: oracle-database-operator-system - labels: - cdb: cdb-dev -spec: - cdbResName: "cdb-dev" - cdbName: "goldcdb" - pdbName: "pdbnew" - pdbState: "OPEN" - modifyOption: "READ WRITE" - action: "Modify" - - - --- Apply the file: - -% kubectl apply -f modify_pdb_open.yaml -pdb.database.oracle.com/pdb1 configured - - --- Monitor the logs from the Oracle DB Operator Pod: - -% kubectl logs -f pod/oracle-database-operator-controller-manager-76cb674c5c-f9wsd -n oracle-database-operator-system -. -. -2022-06-27T03:48:38Z INFO controllers.PDB Reconcile requested {"onpremdboperator": "oracle-database-operator-system/pdb1"} -2022-06-27T03:48:38Z INFO controllers.PDB Validating PDB phase for: pdb1 {"validatePhase": "oracle-database-operator-system/pdb1", "Action": "MODIFY"} -2022-06-27T03:48:38Z INFO controllers.PDB Validation complete {"validatePhase": "oracle-database-operator-system/pdb1"} -2022-06-27T03:48:38Z INFO controllers.PDB PDB: {"onpremdboperator": "oracle-database-operator-system/pdb1", "Name": "pdb1", "Phase": "Modifying", "Status": "false"} -2022-06-27T03:48:38Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T03:48:38Z INFO controllers.PDB Issuing REST call {"callAPI": "oracle-database-operator-system/pdb1", "URL": "http://cdb-dev-ords:8888/ords/_/db-api/latest/database/pdbs/pdbnew/status", "Action": "GET"} -2022-06-27T03:48:38Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T03:48:38Z INFO controllers.PDB Successfully obtained PDB state {"getPDBState": "oracle-database-operator-system/pdb1", "PDB Name": "pdbnew", "State": "MOUNTED"} -2022-06-27T03:48:38Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T03:48:38Z INFO controllers.PDB Issuing REST call {"callAPI": "oracle-database-operator-system/pdb1", "URL": "http://cdb-dev-ords:8888/ords/_/db-api/latest/database/pdbs/pdbnew/status", "Action": "POST"} -2022-06-27T03:48:38Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T03:48:41Z INFO controllers.PDB Successfully modified PDB state {"modifyPDB": "oracle-database-operator-system/pdb1", "PDB Name": "pdbnew"} -2022-06-27T03:48:41Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T03:48:41Z DEBUG events Normal {"object": {"kind":"PDB","namespace":"oracle-database-operator-system","name":"pdb1","uid":"81f2e686-6e1b-4e2c-8a2f-e20c2f99d6b9","apiVersion":"database.oracle.com/v1alpha1","resourceVersion":"101452939"}, "reason": "Modified", "message": "PDB 'pdbnew' modified successfully"} -2022-06-27T03:48:41Z INFO controllers.PDB Issuing REST call {"callAPI": "oracle-database-operator-system/pdb1", "URL": "http://cdb-dev-ords:8888/ords/_/db-api/latest/database/pdbs/pdbnew/status", "Action": "GET"} -2022-06-27T03:48:41Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T03:48:41Z INFO controllers.PDB Successfully obtained PDB state {"getPDBState": "oracle-database-operator-system/pdb1", "PDB Name": "pdbnew", "State": "READ WRITE"} -2022-06-27T03:48:41Z INFO controllers.PDB Reconcile completed {"onpremdboperator": "oracle-database-operator-system/pdb1"} - - - --- Verify the status of the PDB: - -SQL> show pdbs - - CON_ID CON_NAME OPEN MODE RESTRICTED ----------- ------------------------------ ---------- ---------- - 2 PDB$SEED READ WRITE NO - 3 PDBNEW READ WRITE NO - 4 PDBNEWCLONE READ WRITE NO diff --git a/docs/multitenant/provisioning/modify_pdb.md b/docs/multitenant/provisioning/modify_pdb.md deleted file mode 100644 index a0432efb..00000000 --- a/docs/multitenant/provisioning/modify_pdb.md +++ /dev/null @@ -1,69 +0,0 @@ -# Modify a PDB using Oracle DB Operator On-Prem Controller in a target CDB - -In this use case, the state of an existing PDB is modified using Oracle DB Operator On-Prem controller. - -To modify a PDB CRD Resource, a sample .yaml file is available here: [config/samples/onpremdb/pdb_modify.yaml](../../../config/samples/onpremdb/pdb_modify.yaml) - -**NOTE:** It is assumed that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-manage-pdb-life-cycle-using-oracle-db-operator-on-prem-database-controller) steps. - -Subcase 1: This example uses `modify_pdb_close.yaml` to close a PDB using Oracle DB Operator On-Prem Controller with: - -- PDB CRD resource Name as `pdb1` -- Pluggable Database (PDB) Name as `pdbnew` -- Target CDB CRD Resource Name as `cdb-dev` -- Target CDB name as `goldcdb` -- Action to be taken on the PDB as `MODIFY` -- Target state of the PDB as `CLOSE` -- Option to close the state (i.e. modify) as `IMMEDIATE` - - -**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [config/crd/bases/database.oracle.com_pdbs.yaml](../../../config/crd/bases/database.oracle.com_pdbs.yaml) - -Use the file: [modify_pdb_close.yaml](./modify_pdb_close.yaml) for this use case as below: - -1. Deploy the .yaml file: -```sh -% kubectl apply -f modify_pdb_close.yaml -pdb.database.oracle.com/pdb1 configured -``` - -2. Monitor the Oracle DB Operator Pod for the progress of the PDB creation. - -NOTE: Check the DB Operator Pod name in your environment. - -``` -[root@test-server oracle-database-operator]# kubectl logs -f pod/oracle-database-operator-controller-manager-76cb674c5c-f9wsd -n oracle-database-operator-system -``` - -Subcase 2: This example uses `modify_pdb_open.yaml` to open a PDB using Oracle DB Operator On-Prem Controller with: - -- PDB CRD resource Name as `pdb1` -- Pluggable Database (PDB) Name as `pdbnew` -- Target CDB CRD Resource Name as `cdb-dev` -- Target CDB name as `goldcdb` -- Action to be taken on the PDB as `MODIFY` -- Target state of the PDB as `OPEN` -- Option to close the state (i.e. modify) as `READ WRITE` - - -**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [config/crd/bases/database.oracle.com_pdbs.yaml](../../../config/crd/bases/database.oracle.com_pdbs.yaml) - -Use the file: [modify_pdb_open.yaml](./modify_pdb_open.yaml) for this use case as below: - -1. Deploy the .yaml file: -```sh -% kubectl apply -f modify_pdb_open.yaml -pdb.database.oracle.com/pdb1 configured -``` - -2. Monitor the Oracle DB Operator Pod for the progress of the PDB creation. - -NOTE: Check the DB Operator Pod name in your environment. - -``` -[root@test-server oracle-database-operator]# kubectl logs -f pod/oracle-database-operator-controller-manager-76cb674c5c-f9wsd -n oracle-database-operator-system -``` - -## Sample Output - -[Here](./modify_pdb.log) is the sample output for a PDB created using Oracle DB Operator On-Prem Controller using file [modify_pdb_close.yaml](./modify_pdb_close.yaml) and [modify_pdb_open.yaml](./modify_pdb_open.yaml) diff --git a/docs/multitenant/provisioning/modify_pdb_close.yaml b/docs/multitenant/provisioning/modify_pdb_close.yaml deleted file mode 100644 index 8897b482..00000000 --- a/docs/multitenant/provisioning/modify_pdb_close.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# -# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: PDB -metadata: - name: pdb1 - namespace: oracle-database-operator-system - labels: - cdb: cdb-dev -spec: - cdbResName: "cdb-dev" - cdbName: "goldcdb" - pdbName: "pdbnew" - pdbState: "CLOSE" - modifyOption: "IMMEDIATE" - action: "Modify" diff --git a/docs/multitenant/provisioning/modify_pdb_open.yaml b/docs/multitenant/provisioning/modify_pdb_open.yaml deleted file mode 100644 index 0ec640d4..00000000 --- a/docs/multitenant/provisioning/modify_pdb_open.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# -# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: PDB -metadata: - name: pdb1 - namespace: oracle-database-operator-system - labels: - cdb: cdb-dev -spec: - cdbResName: "cdb-dev" - cdbName: "goldcdb" - pdbName: "pdbnew" - pdbState: "OPEN" - modifyOption: "READ WRITE" - action: "Modify" diff --git a/docs/multitenant/provisioning/multinamespace/cdb_create.yaml b/docs/multitenant/provisioning/multinamespace/cdb_create.yaml new file mode 100644 index 00000000..d3b5e04f --- /dev/null +++ b/docs/multitenant/provisioning/multinamespace/cdb_create.yaml @@ -0,0 +1,44 @@ +apiVersion: database.oracle.com/v1alpha1 +kind: CDB +metadata: + name: cdb-dev + namespace: cdbnamespace +spec: + cdbName: "DB12" + ordsImage: ".............your registry............./ords-dboper:latest" + ordsImagePullPolicy: "Always" + dbTnsurl : "...Container tns alias....." + replicas: 1 + sysAdminPwd: + secret: + secretName: "cdb1-secret" + key: "sysadmin_pwd" + ordsPwd: + secret: + secretName: "cdb1-secret" + key: "ords_pwd" + cdbAdminUser: + secret: + secretName: "cdb1-secret" + key: "cdbadmin_user" + cdbAdminPwd: + secret: + secretName: "cdb1-secret" + key: "cdbadmin_pwd" + webServerUser: + secret: + secretName: "cdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "cdb1-secret" + key: "webserver_pwd" + cdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + cdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + diff --git a/docs/multitenant/provisioning/multinamespace/pdb_clone.yaml b/docs/multitenant/provisioning/multinamespace/pdb_clone.yaml new file mode 100644 index 00000000..b88fb71b --- /dev/null +++ b/docs/multitenant/provisioning/multinamespace/pdb_clone.yaml @@ -0,0 +1,50 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb2 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdb2_clone" + srcPdbName: "pdbdev" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + assertivePdbDeletion: true + adminName: + secret: + secretName: "pdb1-secret" + key: "sysadmin_user" + adminPwd: + secret: + secretName: "pdb1-secret" + key: "sysadmin_pwd" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "pdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "pdb1-secret" + key: "webserver_pwd" + action: "Clone" diff --git a/docs/multitenant/provisioning/multinamespace/pdb_close.yaml b/docs/multitenant/provisioning/multinamespace/pdb_close.yaml new file mode 100644 index 00000000..a823f5d9 --- /dev/null +++ b/docs/multitenant/provisioning/multinamespace/pdb_close.yaml @@ -0,0 +1,44 @@ +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + adminName: + secret: + secretName: "pdb1-secret" + key: "sysadmin_user" + adminPwd: + secret: + secretName: "pdb1-secret" + key: "sysadmin_pwd" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "pdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "pdb1-secret" + key: "webserver_pwd" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" + diff --git a/docs/multitenant/provisioning/multinamespace/pdb_create.yaml b/docs/multitenant/provisioning/multinamespace/pdb_create.yaml new file mode 100644 index 00000000..200f3712 --- /dev/null +++ b/docs/multitenant/provisioning/multinamespace/pdb_create.yaml @@ -0,0 +1,46 @@ +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + adminName: + secret: + secretName: "pdb1-secret" + key: "sysadmin_user" + adminPwd: + secret: + secretName: "pdb1-secret" + key: "sysadmin_pwd" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "pdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "pdb1-secret" + key: "webserver_pwd" + fileNameConversions: "NONE" + tdeImport: false + totalSize: "1G" + tempSize: "100M" + action: "Create" + diff --git a/docs/multitenant/provisioning/multinamespace/pdb_delete.yaml b/docs/multitenant/provisioning/multinamespace/pdb_delete.yaml new file mode 100644 index 00000000..282885b0 --- /dev/null +++ b/docs/multitenant/provisioning/multinamespace/pdb_delete.yaml @@ -0,0 +1,34 @@ +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + pdbName: "pdbdev" + action: "Delete" + dropAction: "INCLUDING" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "pdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "pdb1-secret" + key: "webserver_pwd" + diff --git a/docs/multitenant/provisioning/multinamespace/pdb_open.yaml b/docs/multitenant/provisioning/multinamespace/pdb_open.yaml new file mode 100644 index 00000000..85fb2ce4 --- /dev/null +++ b/docs/multitenant/provisioning/multinamespace/pdb_open.yaml @@ -0,0 +1,43 @@ +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + adminName: + secret: + secretName: "pdb1-secret" + key: "sysadmin_user" + adminPwd: + secret: + secretName: "pdb1-secret" + key: "sysadmin_pwd" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "pdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "pdb1-secret" + key: "webserver_pwd" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" diff --git a/docs/multitenant/provisioning/multinamespace/pdb_plug.yaml b/docs/multitenant/provisioning/multinamespace/pdb_plug.yaml new file mode 100644 index 00000000..d9135f13 --- /dev/null +++ b/docs/multitenant/provisioning/multinamespace/pdb_plug.yaml @@ -0,0 +1,46 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + fileNameConversions: "NONE" + sourceFileNameConversions: "NONE" + copyAction: "MOVE" + totalSize: "1G" + tempSize: "100M" + action: "Plug" + assertivePdbDeletion: true + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "pdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "pdb1-secret" + key: "webserver_pwd" + + diff --git a/docs/multitenant/provisioning/multinamespace/pdb_unplug.yaml b/docs/multitenant/provisioning/multinamespace/pdb_unplug.yaml new file mode 100644 index 00000000..f3667dad --- /dev/null +++ b/docs/multitenant/provisioning/multinamespace/pdb_unplug.yaml @@ -0,0 +1,39 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "Unplug" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "pdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "pdb1-secret" + key: "webserver_pwd" + diff --git a/docs/multitenant/provisioning/ords_image.log b/docs/multitenant/provisioning/ords_image.log deleted file mode 100644 index 771a22ca..00000000 --- a/docs/multitenant/provisioning/ords_image.log +++ /dev/null @@ -1,503 +0,0 @@ -/usr/bin/docker build -t oracle/ords-dboper:22.2.1 . -Sending build context to Docker daemon 280.6kB -Step 1/10 : FROM container-registry.oracle.com/java/jdk:latest - ---> 44b86e8925c4 -Step 2/10 : ENV ORDS_HOME=/opt/oracle/ords RUN_FILE="runOrdsSSL.sh" - ---> Running in e22c5a03b869 -Removing intermediate container e22c5a03b869 - ---> 1421497abef8 -Step 3/10 : COPY $RUN_FILE $ORDS_HOME/ - ---> d96ac1477d2d -Step 4/10 : RUN yum -y install yum-utils bind-utils tree hostname openssl net-tools zip unzip tar wget vim-minimal which sudo expect procps && yum-config-manager --add-repo=http://yum.oracle.com/repo/OracleLinux/OL8/oracle/software/x86_64 && yum -y install java-11-openjdk-devel && yum -y install ords && yum -y install iproute && yum clean all - ---> Running in c08b8dac80a5 -Oracle Linux 8 BaseOS Latest (x86_64) 72 MB/s | 49 MB 00:00 -Oracle Linux 8 Application Stream (x86_64) 88 MB/s | 37 MB 00:00 -Last metadata expiration check: 0:00:07 ago on Mon 12 Sep 2022 03:23:32 PM UTC. -Package yum-utils-4.0.21-11.0.1.el8.noarch is already installed. -Package vim-minimal-2:8.0.1763-19.0.1.el8_6.2.x86_64 is already installed. -Package procps-ng-3.3.15-6.0.1.el8.x86_64 is already installed. -Dependencies resolved. -================================================================================ - Package Arch Version Repository Size -================================================================================ -Installing: - bind-utils x86_64 32:9.11.36-3.el8 ol8_appstream 452 k - expect x86_64 5.45.4-5.el8 ol8_baseos_latest 266 k - hostname x86_64 3.20-6.el8 ol8_baseos_latest 32 k - net-tools x86_64 2.0-0.52.20160912git.el8 ol8_baseos_latest 322 k - openssl x86_64 1:1.1.1k-7.el8_6 ol8_baseos_latest 709 k - sudo x86_64 1.8.29-8.el8 ol8_baseos_latest 925 k - tar x86_64 2:1.30-5.el8 ol8_baseos_latest 838 k - tree x86_64 1.7.0-15.el8 ol8_baseos_latest 59 k - unzip x86_64 6.0-46.0.1.el8 ol8_baseos_latest 196 k - wget x86_64 1.19.5-10.0.1.el8 ol8_appstream 734 k - which x86_64 2.21-17.el8 ol8_baseos_latest 49 k - zip x86_64 3.0-23.el8 ol8_baseos_latest 270 k -Upgrading: - openssl-libs x86_64 1:1.1.1k-7.el8_6 ol8_baseos_latest 1.5 M - vim-minimal x86_64 2:8.0.1763-19.0.1.el8_6.4 ol8_baseos_latest 575 k -Installing dependencies: - bind-libs x86_64 32:9.11.36-3.el8 ol8_appstream 175 k - bind-libs-lite x86_64 32:9.11.36-3.el8 ol8_appstream 1.2 M - bind-license noarch 32:9.11.36-3.el8 ol8_appstream 103 k - fstrm x86_64 0.6.1-2.el8 ol8_appstream 29 k - libmaxminddb x86_64 1.2.0-10.el8 ol8_appstream 33 k - libmetalink x86_64 0.1.3-7.el8 ol8_baseos_latest 32 k - protobuf-c x86_64 1.3.0-6.el8 ol8_appstream 37 k - python3-bind noarch 32:9.11.36-3.el8 ol8_appstream 150 k - python3-ply noarch 3.9-9.el8 ol8_baseos_latest 111 k - tcl x86_64 1:8.6.8-2.el8 ol8_baseos_latest 1.1 M - -Transaction Summary -================================================================================ -Install 22 Packages -Upgrade 2 Packages - -Total download size: 9.7 M -Downloading Packages: -(1/24): expect-5.45.4-5.el8.x86_64.rpm 158 kB/s | 266 kB 00:01 -(2/24): hostname-3.20-6.el8.x86_64.rpm 18 kB/s | 32 kB 00:01 -(3/24): libmetalink-0.1.3-7.el8.x86_64.rpm 18 kB/s | 32 kB 00:01 -(4/24): net-tools-2.0-0.52.20160912git.el8.x86_ 2.3 MB/s | 322 kB 00:00 -(5/24): openssl-1.1.1k-7.el8_6.x86_64.rpm 4.0 MB/s | 709 kB 00:00 -(6/24): python3-ply-3.9-9.el8.noarch.rpm 538 kB/s | 111 kB 00:00 -(7/24): sudo-1.8.29-8.el8.x86_64.rpm 5.0 MB/s | 925 kB 00:00 -(8/24): tar-1.30-5.el8.x86_64.rpm 4.2 MB/s | 838 kB 00:00 -(9/24): unzip-6.0-46.0.1.el8.x86_64.rpm 3.6 MB/s | 196 kB 00:00 -(10/24): tcl-8.6.8-2.el8.x86_64.rpm 4.1 MB/s | 1.1 MB 00:00 -(11/24): which-2.21-17.el8.x86_64.rpm 613 kB/s | 49 kB 00:00 -(12/24): tree-1.7.0-15.el8.x86_64.rpm 208 kB/s | 59 kB 00:00 -(13/24): bind-libs-9.11.36-3.el8.x86_64.rpm 1.3 MB/s | 175 kB 00:00 -(14/24): bind-license-9.11.36-3.el8.noarch.rpm 2.6 MB/s | 103 kB 00:00 -(15/24): bind-libs-lite-9.11.36-3.el8.x86_64.rp 6.8 MB/s | 1.2 MB 00:00 -(16/24): bind-utils-9.11.36-3.el8.x86_64.rpm 3.6 MB/s | 452 kB 00:00 -(17/24): zip-3.0-23.el8.x86_64.rpm 804 kB/s | 270 kB 00:00 -(18/24): libmaxminddb-1.2.0-10.el8.x86_64.rpm 529 kB/s | 33 kB 00:00 -(19/24): fstrm-0.6.1-2.el8.x86_64.rpm 161 kB/s | 29 kB 00:00 -(20/24): python3-bind-9.11.36-3.el8.noarch.rpm 2.0 MB/s | 150 kB 00:00 -(21/24): protobuf-c-1.3.0-6.el8.x86_64.rpm 351 kB/s | 37 kB 00:00 -(22/24): vim-minimal-8.0.1763-19.0.1.el8_6.4.x8 6.4 MB/s | 575 kB 00:00 -(23/24): wget-1.19.5-10.0.1.el8.x86_64.rpm 3.3 MB/s | 734 kB 00:00 -(24/24): openssl-libs-1.1.1k-7.el8_6.x86_64.rpm 6.8 MB/s | 1.5 MB 00:00 --------------------------------------------------------------------------------- -Total 3.3 MB/s | 9.7 MB 00:02 -Running transaction check -Transaction check succeeded. -Running transaction test -Transaction test succeeded. -Running transaction - Preparing : 1/1 - Upgrading : openssl-libs-1:1.1.1k-7.el8_6.x86_64 1/26 - Running scriptlet: openssl-libs-1:1.1.1k-7.el8_6.x86_64 1/26 - Installing : protobuf-c-1.3.0-6.el8.x86_64 2/26 - Installing : libmaxminddb-1.2.0-10.el8.x86_64 3/26 - Running scriptlet: libmaxminddb-1.2.0-10.el8.x86_64 3/26 - Installing : fstrm-0.6.1-2.el8.x86_64 4/26 - Installing : bind-license-32:9.11.36-3.el8.noarch 5/26 - Installing : bind-libs-lite-32:9.11.36-3.el8.x86_64 6/26 - Installing : bind-libs-32:9.11.36-3.el8.x86_64 7/26 - Upgrading : vim-minimal-2:8.0.1763-19.0.1.el8_6.4.x86_64 8/26 - Installing : unzip-6.0-46.0.1.el8.x86_64 9/26 - Installing : tcl-1:8.6.8-2.el8.x86_64 10/26 - Running scriptlet: tcl-1:8.6.8-2.el8.x86_64 10/26 - Installing : python3-ply-3.9-9.el8.noarch 11/26 - Installing : python3-bind-32:9.11.36-3.el8.noarch 12/26 - Installing : libmetalink-0.1.3-7.el8.x86_64 13/26 - Installing : wget-1.19.5-10.0.1.el8.x86_64 14/26 - Running scriptlet: wget-1.19.5-10.0.1.el8.x86_64 14/26 - Installing : bind-utils-32:9.11.36-3.el8.x86_64 15/26 - Installing : expect-5.45.4-5.el8.x86_64 16/26 - Installing : zip-3.0-23.el8.x86_64 17/26 - Installing : sudo-1.8.29-8.el8.x86_64 18/26 - Running scriptlet: sudo-1.8.29-8.el8.x86_64 18/26 - Installing : openssl-1:1.1.1k-7.el8_6.x86_64 19/26 - Installing : which-2.21-17.el8.x86_64 20/26 - Installing : tree-1.7.0-15.el8.x86_64 21/26 - Installing : tar-2:1.30-5.el8.x86_64 22/26 - Running scriptlet: tar-2:1.30-5.el8.x86_64 22/26 - Installing : net-tools-2.0-0.52.20160912git.el8.x86_64 23/26 - Running scriptlet: net-tools-2.0-0.52.20160912git.el8.x86_64 23/26 - Installing : hostname-3.20-6.el8.x86_64 24/26 - Running scriptlet: hostname-3.20-6.el8.x86_64 24/26 - Cleanup : vim-minimal-2:8.0.1763-19.0.1.el8_6.2.x86_64 25/26 - Cleanup : openssl-libs-1:1.1.1k-6.el8_5.x86_64 26/26 - Running scriptlet: openssl-libs-1:1.1.1k-6.el8_5.x86_64 26/26 - Verifying : expect-5.45.4-5.el8.x86_64 1/26 - Verifying : hostname-3.20-6.el8.x86_64 2/26 - Verifying : libmetalink-0.1.3-7.el8.x86_64 3/26 - Verifying : net-tools-2.0-0.52.20160912git.el8.x86_64 4/26 - Verifying : openssl-1:1.1.1k-7.el8_6.x86_64 5/26 - Verifying : python3-ply-3.9-9.el8.noarch 6/26 - Verifying : sudo-1.8.29-8.el8.x86_64 7/26 - Verifying : tar-2:1.30-5.el8.x86_64 8/26 - Verifying : tcl-1:8.6.8-2.el8.x86_64 9/26 - Verifying : tree-1.7.0-15.el8.x86_64 10/26 - Verifying : unzip-6.0-46.0.1.el8.x86_64 11/26 - Verifying : which-2.21-17.el8.x86_64 12/26 - Verifying : zip-3.0-23.el8.x86_64 13/26 - Verifying : bind-libs-32:9.11.36-3.el8.x86_64 14/26 - Verifying : bind-libs-lite-32:9.11.36-3.el8.x86_64 15/26 - Verifying : bind-license-32:9.11.36-3.el8.noarch 16/26 - Verifying : bind-utils-32:9.11.36-3.el8.x86_64 17/26 - Verifying : fstrm-0.6.1-2.el8.x86_64 18/26 - Verifying : libmaxminddb-1.2.0-10.el8.x86_64 19/26 - Verifying : protobuf-c-1.3.0-6.el8.x86_64 20/26 - Verifying : python3-bind-32:9.11.36-3.el8.noarch 21/26 - Verifying : wget-1.19.5-10.0.1.el8.x86_64 22/26 - Verifying : openssl-libs-1:1.1.1k-7.el8_6.x86_64 23/26 - Verifying : openssl-libs-1:1.1.1k-6.el8_5.x86_64 24/26 - Verifying : vim-minimal-2:8.0.1763-19.0.1.el8_6.4.x86_64 25/26 - Verifying : vim-minimal-2:8.0.1763-19.0.1.el8_6.2.x86_64 26/26 - -Upgraded: - openssl-libs-1:1.1.1k-7.el8_6.x86_64 - vim-minimal-2:8.0.1763-19.0.1.el8_6.4.x86_64 -Installed: - bind-libs-32:9.11.36-3.el8.x86_64 - bind-libs-lite-32:9.11.36-3.el8.x86_64 - bind-license-32:9.11.36-3.el8.noarch - bind-utils-32:9.11.36-3.el8.x86_64 - expect-5.45.4-5.el8.x86_64 - fstrm-0.6.1-2.el8.x86_64 - hostname-3.20-6.el8.x86_64 - libmaxminddb-1.2.0-10.el8.x86_64 - libmetalink-0.1.3-7.el8.x86_64 - net-tools-2.0-0.52.20160912git.el8.x86_64 - openssl-1:1.1.1k-7.el8_6.x86_64 - protobuf-c-1.3.0-6.el8.x86_64 - python3-bind-32:9.11.36-3.el8.noarch - python3-ply-3.9-9.el8.noarch - sudo-1.8.29-8.el8.x86_64 - tar-2:1.30-5.el8.x86_64 - tcl-1:8.6.8-2.el8.x86_64 - tree-1.7.0-15.el8.x86_64 - unzip-6.0-46.0.1.el8.x86_64 - wget-1.19.5-10.0.1.el8.x86_64 - which-2.21-17.el8.x86_64 - zip-3.0-23.el8.x86_64 - -Complete! -Adding repo from: http://yum.oracle.com/repo/OracleLinux/OL8/oracle/software/x86_64 -created by dnf config-manager from http://yum.o 221 kB/s | 45 kB 00:00 -Dependencies resolved. -============================================================================================= - Package Arch Version Repository Size -============================================================================================= -Installing: - java-11-openjdk-devel x86_64 1:11.0.16.1.1-1.el8_6 ol8_appstream 3.4 M -Installing dependencies: - alsa-lib x86_64 1.2.6.1-3.el8 ol8_appstream 491 k - avahi-libs x86_64 0.7-20.el8 ol8_baseos_latest 62 k - copy-jdk-configs noarch 4.0-2.el8 ol8_appstream 30 k - crypto-policies-scripts noarch 20211116-1.gitae470d6.el8 ol8_baseos_latest 83 k - cups-libs x86_64 1:2.2.6-45.el8_6.2 ol8_baseos_latest 434 k - giflib x86_64 5.1.4-3.el8 ol8_appstream 51 k - graphite2 x86_64 1.3.10-10.el8 ol8_appstream 122 k - harfbuzz x86_64 1.7.5-3.el8 ol8_appstream 295 k - java-11-openjdk x86_64 1:11.0.16.1.1-1.el8_6 ol8_appstream 272 k - java-11-openjdk-headless x86_64 1:11.0.16.1.1-1.el8_6 ol8_appstream 40 M - javapackages-filesystem noarch 5.3.0-1.module+el8+5136+7ff78f74 ol8_appstream 30 k - lcms2 x86_64 2.9-2.el8 ol8_appstream 164 k - libX11 x86_64 1.6.8-5.el8 ol8_appstream 611 k - libX11-common noarch 1.6.8-5.el8 ol8_appstream 158 k - libXau x86_64 1.0.9-3.el8 ol8_appstream 37 k - libXcomposite x86_64 0.4.4-14.el8 ol8_appstream 28 k - libXext x86_64 1.3.4-1.el8 ol8_appstream 45 k - libXi x86_64 1.7.10-1.el8 ol8_appstream 49 k - libXrender x86_64 0.9.10-7.el8 ol8_appstream 33 k - libXtst x86_64 1.2.3-7.el8 ol8_appstream 22 k - libfontenc x86_64 1.1.3-8.el8 ol8_appstream 37 k - libjpeg-turbo x86_64 1.5.3-12.el8 ol8_appstream 157 k - libpkgconf x86_64 1.4.2-1.el8 ol8_baseos_latest 35 k - libxcb x86_64 1.13.1-1.el8 ol8_appstream 231 k - lksctp-tools x86_64 1.0.18-3.el8 ol8_baseos_latest 100 k - lua x86_64 5.3.4-12.el8 ol8_appstream 192 k - nspr x86_64 4.32.0-1.el8_4 ol8_appstream 142 k - nss x86_64 3.67.0-7.el8_5 ol8_appstream 741 k - nss-softokn x86_64 3.67.0-7.el8_5 ol8_appstream 487 k - nss-softokn-freebl x86_64 3.67.0-7.el8_5 ol8_appstream 395 k - nss-sysinit x86_64 3.67.0-7.el8_5 ol8_appstream 73 k - nss-util x86_64 3.67.0-7.el8_5 ol8_appstream 137 k - pkgconf x86_64 1.4.2-1.el8 ol8_baseos_latest 38 k - pkgconf-m4 noarch 1.4.2-1.el8 ol8_baseos_latest 17 k - pkgconf-pkg-config x86_64 1.4.2-1.el8 ol8_baseos_latest 15 k - ttmkfdir x86_64 3.0.9-54.el8 ol8_appstream 62 k - tzdata-java noarch 2022c-1.el8 ol8_appstream 186 k - xorg-x11-font-utils x86_64 1:7.5-41.el8 ol8_appstream 104 k - xorg-x11-fonts-Type1 noarch 7.5-19.el8 ol8_appstream 522 k -Enabling module streams: - javapackages-runtime 201801 - -Transaction Summary -============================================================================================= -Install 40 Packages - -Total download size: 50 M -Installed size: 194 M -Downloading Packages: -(1/40): crypto-policies-scripts-20211116-1.gita 1.3 MB/s | 83 kB 00:00 -(2/40): avahi-libs-0.7-20.el8.x86_64.rpm 952 kB/s | 62 kB 00:00 -(3/40): libpkgconf-1.4.2-1.el8.x86_64.rpm 2.2 MB/s | 35 kB 00:00 -(4/40): cups-libs-2.2.6-45.el8_6.2.x86_64.rpm 4.9 MB/s | 434 kB 00:00 -(5/40): lksctp-tools-1.0.18-3.el8.x86_64.rpm 3.9 MB/s | 100 kB 00:00 -(6/40): pkgconf-1.4.2-1.el8.x86_64.rpm 2.3 MB/s | 38 kB 00:00 -(7/40): pkgconf-m4-1.4.2-1.el8.noarch.rpm 1.1 MB/s | 17 kB 00:00 -(8/40): pkgconf-pkg-config-1.4.2-1.el8.x86_64.r 1.1 MB/s | 15 kB 00:00 -(9/40): copy-jdk-configs-4.0-2.el8.noarch.rpm 1.8 MB/s | 30 kB 00:00 -(10/40): giflib-5.1.4-3.el8.x86_64.rpm 3.0 MB/s | 51 kB 00:00 -(11/40): alsa-lib-1.2.6.1-3.el8.x86_64.rpm 12 MB/s | 491 kB 00:00 -(12/40): graphite2-1.3.10-10.el8.x86_64.rpm 5.9 MB/s | 122 kB 00:00 -(13/40): harfbuzz-1.7.5-3.el8.x86_64.rpm 13 MB/s | 295 kB 00:00 -(14/40): java-11-openjdk-11.0.16.1.1-1.el8_6.x8 15 MB/s | 272 kB 00:00 -(15/40): javapackages-filesystem-5.3.0-1.module 2.1 MB/s | 30 kB 00:00 -(16/40): lcms2-2.9-2.el8.x86_64.rpm 9.5 MB/s | 164 kB 00:00 -(17/40): libX11-1.6.8-5.el8.x86_64.rpm 24 MB/s | 611 kB 00:00 -(18/40): java-11-openjdk-devel-11.0.16.1.1-1.el 40 MB/s | 3.4 MB 00:00 -(19/40): libX11-common-1.6.8-5.el8.noarch.rpm 8.6 MB/s | 158 kB 00:00 -(20/40): libXau-1.0.9-3.el8.x86_64.rpm 2.6 MB/s | 37 kB 00:00 -(21/40): libXcomposite-0.4.4-14.el8.x86_64.rpm 2.2 MB/s | 28 kB 00:00 -(22/40): libXext-1.3.4-1.el8.x86_64.rpm 2.7 MB/s | 45 kB 00:00 -(23/40): libXi-1.7.10-1.el8.x86_64.rpm 2.8 MB/s | 49 kB 00:00 -(24/40): libXrender-0.9.10-7.el8.x86_64.rpm 2.4 MB/s | 33 kB 00:00 -(25/40): libXtst-1.2.3-7.el8.x86_64.rpm 1.6 MB/s | 22 kB 00:00 -(26/40): libfontenc-1.1.3-8.el8.x86_64.rpm 2.7 MB/s | 37 kB 00:00 -(27/40): libjpeg-turbo-1.5.3-12.el8.x86_64.rpm 9.6 MB/s | 157 kB 00:00 -(28/40): libxcb-1.13.1-1.el8.x86_64.rpm 13 MB/s | 231 kB 00:00 -(29/40): lua-5.3.4-12.el8.x86_64.rpm 11 MB/s | 192 kB 00:00 -(30/40): nspr-4.32.0-1.el8_4.x86_64.rpm 9.2 MB/s | 142 kB 00:00 -(31/40): nss-3.67.0-7.el8_5.x86_64.rpm 31 MB/s | 741 kB 00:00 -(32/40): nss-softokn-3.67.0-7.el8_5.x86_64.rpm 24 MB/s | 487 kB 00:00 -(33/40): nss-softokn-freebl-3.67.0-7.el8_5.x86_ 18 MB/s | 395 kB 00:00 -(34/40): nss-sysinit-3.67.0-7.el8_5.x86_64.rpm 4.3 MB/s | 73 kB 00:00 -(35/40): nss-util-3.67.0-7.el8_5.x86_64.rpm 8.7 MB/s | 137 kB 00:00 -(36/40): ttmkfdir-3.0.9-54.el8.x86_64.rpm 4.0 MB/s | 62 kB 00:00 -(37/40): tzdata-java-2022c-1.el8.noarch.rpm 12 MB/s | 186 kB 00:00 -(38/40): xorg-x11-font-utils-7.5-41.el8.x86_64. 6.0 MB/s | 104 kB 00:00 -(39/40): xorg-x11-fonts-Type1-7.5-19.el8.noarch 23 MB/s | 522 kB 00:00 -(40/40): java-11-openjdk-headless-11.0.16.1.1-1 73 MB/s | 40 MB 00:00 --------------------------------------------------------------------------------- -Total 71 MB/s | 50 MB 00:00 -Running transaction check -Transaction check succeeded. -Running transaction test -Transaction test succeeded. -Running transaction - Running scriptlet: copy-jdk-configs-4.0-2.el8.noarch 1/1 - Running scriptlet: java-11-openjdk-headless-1:11.0.16.1.1-1.el8_6.x86_6 1/1 - Preparing : 1/1 - Installing : nspr-4.32.0-1.el8_4.x86_64 1/40 - Running scriptlet: nspr-4.32.0-1.el8_4.x86_64 1/40 - Installing : nss-util-3.67.0-7.el8_5.x86_64 2/40 - Installing : libjpeg-turbo-1.5.3-12.el8.x86_64 3/40 - Installing : nss-softokn-freebl-3.67.0-7.el8_5.x86_64 4/40 - Installing : nss-softokn-3.67.0-7.el8_5.x86_64 5/40 - Installing : tzdata-java-2022c-1.el8.noarch 6/40 - Installing : ttmkfdir-3.0.9-54.el8.x86_64 7/40 - Installing : lua-5.3.4-12.el8.x86_64 8/40 - Installing : copy-jdk-configs-4.0-2.el8.noarch 9/40 - Installing : libfontenc-1.1.3-8.el8.x86_64 10/40 - Installing : libXau-1.0.9-3.el8.x86_64 11/40 - Installing : libxcb-1.13.1-1.el8.x86_64 12/40 - Installing : libX11-common-1.6.8-5.el8.noarch 13/40 - Installing : libX11-1.6.8-5.el8.x86_64 14/40 - Installing : libXext-1.3.4-1.el8.x86_64 15/40 - Installing : libXi-1.7.10-1.el8.x86_64 16/40 - Installing : libXtst-1.2.3-7.el8.x86_64 17/40 - Installing : libXcomposite-0.4.4-14.el8.x86_64 18/40 - Installing : libXrender-0.9.10-7.el8.x86_64 19/40 - Installing : lcms2-2.9-2.el8.x86_64 20/40 - Running scriptlet: lcms2-2.9-2.el8.x86_64 20/40 - Installing : javapackages-filesystem-5.3.0-1.module+el8+5136+7f 21/40 - Installing : graphite2-1.3.10-10.el8.x86_64 22/40 - Installing : harfbuzz-1.7.5-3.el8.x86_64 23/40 - Running scriptlet: harfbuzz-1.7.5-3.el8.x86_64 23/40 - Installing : giflib-5.1.4-3.el8.x86_64 24/40 - Installing : alsa-lib-1.2.6.1-3.el8.x86_64 25/40 - Running scriptlet: alsa-lib-1.2.6.1-3.el8.x86_64 25/40 - Installing : pkgconf-m4-1.4.2-1.el8.noarch 26/40 - Installing : lksctp-tools-1.0.18-3.el8.x86_64 27/40 - Running scriptlet: lksctp-tools-1.0.18-3.el8.x86_64 27/40 - Installing : libpkgconf-1.4.2-1.el8.x86_64 28/40 - Installing : pkgconf-1.4.2-1.el8.x86_64 29/40 - Installing : pkgconf-pkg-config-1.4.2-1.el8.x86_64 30/40 - Installing : xorg-x11-font-utils-1:7.5-41.el8.x86_64 31/40 - Installing : xorg-x11-fonts-Type1-7.5-19.el8.noarch 32/40 - Running scriptlet: xorg-x11-fonts-Type1-7.5-19.el8.noarch 32/40 - Installing : crypto-policies-scripts-20211116-1.gitae470d6.el8. 33/40 - Installing : nss-sysinit-3.67.0-7.el8_5.x86_64 34/40 - Installing : nss-3.67.0-7.el8_5.x86_64 35/40 - Installing : avahi-libs-0.7-20.el8.x86_64 36/40 - Installing : cups-libs-1:2.2.6-45.el8_6.2.x86_64 37/40 - Installing : java-11-openjdk-headless-1:11.0.16.1.1-1.el8_6.x86 38/40 - Running scriptlet: java-11-openjdk-headless-1:11.0.16.1.1-1.el8_6.x86 38/40 - Installing : java-11-openjdk-1:11.0.16.1.1-1.el8_6.x86_64 39/40 - Running scriptlet: java-11-openjdk-1:11.0.16.1.1-1.el8_6.x86_64 39/40 - Installing : java-11-openjdk-devel-1:11.0.16.1.1-1.el8_6.x86_64 40/40 - Running scriptlet: java-11-openjdk-devel-1:11.0.16.1.1-1.el8_6.x86_64 40/40 - Running scriptlet: copy-jdk-configs-4.0-2.el8.noarch 40/40 - Running scriptlet: crypto-policies-scripts-20211116-1.gitae470d6.el8. 40/40 - Running scriptlet: nss-3.67.0-7.el8_5.x86_64 40/40 - Running scriptlet: java-11-openjdk-headless-1:11.0.16.1.1-1.el8_6.x86 40/40 - Running scriptlet: java-11-openjdk-1:11.0.16.1.1-1.el8_6.x86_64 40/40 - Running scriptlet: java-11-openjdk-devel-1:11.0.16.1.1-1.el8_6.x86_64 40/40 - Verifying : avahi-libs-0.7-20.el8.x86_64 1/40 - Verifying : crypto-policies-scripts-20211116-1.gitae470d6.el8. 2/40 - Verifying : cups-libs-1:2.2.6-45.el8_6.2.x86_64 3/40 - Verifying : libpkgconf-1.4.2-1.el8.x86_64 4/40 - Verifying : lksctp-tools-1.0.18-3.el8.x86_64 5/40 - Verifying : pkgconf-1.4.2-1.el8.x86_64 6/40 - Verifying : pkgconf-m4-1.4.2-1.el8.noarch 7/40 - Verifying : pkgconf-pkg-config-1.4.2-1.el8.x86_64 8/40 - Verifying : alsa-lib-1.2.6.1-3.el8.x86_64 9/40 - Verifying : copy-jdk-configs-4.0-2.el8.noarch 10/40 - Verifying : giflib-5.1.4-3.el8.x86_64 11/40 - Verifying : graphite2-1.3.10-10.el8.x86_64 12/40 - Verifying : harfbuzz-1.7.5-3.el8.x86_64 13/40 - Verifying : java-11-openjdk-1:11.0.16.1.1-1.el8_6.x86_64 14/40 - Verifying : java-11-openjdk-devel-1:11.0.16.1.1-1.el8_6.x86_64 15/40 - Verifying : java-11-openjdk-headless-1:11.0.16.1.1-1.el8_6.x86 16/40 - Verifying : javapackages-filesystem-5.3.0-1.module+el8+5136+7f 17/40 - Verifying : lcms2-2.9-2.el8.x86_64 18/40 - Verifying : libX11-1.6.8-5.el8.x86_64 19/40 - Verifying : libX11-common-1.6.8-5.el8.noarch 20/40 - Verifying : libXau-1.0.9-3.el8.x86_64 21/40 - Verifying : libXcomposite-0.4.4-14.el8.x86_64 22/40 - Verifying : libXext-1.3.4-1.el8.x86_64 23/40 - Verifying : libXi-1.7.10-1.el8.x86_64 24/40 - Verifying : libXrender-0.9.10-7.el8.x86_64 25/40 - Verifying : libXtst-1.2.3-7.el8.x86_64 26/40 - Verifying : libfontenc-1.1.3-8.el8.x86_64 27/40 - Verifying : libjpeg-turbo-1.5.3-12.el8.x86_64 28/40 - Verifying : libxcb-1.13.1-1.el8.x86_64 29/40 - Verifying : lua-5.3.4-12.el8.x86_64 30/40 - Verifying : nspr-4.32.0-1.el8_4.x86_64 31/40 - Verifying : nss-3.67.0-7.el8_5.x86_64 32/40 - Verifying : nss-softokn-3.67.0-7.el8_5.x86_64 33/40 - Verifying : nss-softokn-freebl-3.67.0-7.el8_5.x86_64 34/40 - Verifying : nss-sysinit-3.67.0-7.el8_5.x86_64 35/40 - Verifying : nss-util-3.67.0-7.el8_5.x86_64 36/40 - Verifying : ttmkfdir-3.0.9-54.el8.x86_64 37/40 - Verifying : tzdata-java-2022c-1.el8.noarch 38/40 - Verifying : xorg-x11-font-utils-1:7.5-41.el8.x86_64 39/40 - Verifying : xorg-x11-fonts-Type1-7.5-19.el8.noarch 40/40 - -Installed: - alsa-lib-1.2.6.1-3.el8.x86_64 - avahi-libs-0.7-20.el8.x86_64 - copy-jdk-configs-4.0-2.el8.noarch - crypto-policies-scripts-20211116-1.gitae470d6.el8.noarch - cups-libs-1:2.2.6-45.el8_6.2.x86_64 - giflib-5.1.4-3.el8.x86_64 - graphite2-1.3.10-10.el8.x86_64 - harfbuzz-1.7.5-3.el8.x86_64 - java-11-openjdk-1:11.0.16.1.1-1.el8_6.x86_64 - java-11-openjdk-devel-1:11.0.16.1.1-1.el8_6.x86_64 - java-11-openjdk-headless-1:11.0.16.1.1-1.el8_6.x86_64 - javapackages-filesystem-5.3.0-1.module+el8+5136+7ff78f74.noarch - lcms2-2.9-2.el8.x86_64 - libX11-1.6.8-5.el8.x86_64 - libX11-common-1.6.8-5.el8.noarch - libXau-1.0.9-3.el8.x86_64 - libXcomposite-0.4.4-14.el8.x86_64 - libXext-1.3.4-1.el8.x86_64 - libXi-1.7.10-1.el8.x86_64 - libXrender-0.9.10-7.el8.x86_64 - libXtst-1.2.3-7.el8.x86_64 - libfontenc-1.1.3-8.el8.x86_64 - libjpeg-turbo-1.5.3-12.el8.x86_64 - libpkgconf-1.4.2-1.el8.x86_64 - libxcb-1.13.1-1.el8.x86_64 - lksctp-tools-1.0.18-3.el8.x86_64 - lua-5.3.4-12.el8.x86_64 - nspr-4.32.0-1.el8_4.x86_64 - nss-3.67.0-7.el8_5.x86_64 - nss-softokn-3.67.0-7.el8_5.x86_64 - nss-softokn-freebl-3.67.0-7.el8_5.x86_64 - nss-sysinit-3.67.0-7.el8_5.x86_64 - nss-util-3.67.0-7.el8_5.x86_64 - pkgconf-1.4.2-1.el8.x86_64 - pkgconf-m4-1.4.2-1.el8.noarch - pkgconf-pkg-config-1.4.2-1.el8.x86_64 - ttmkfdir-3.0.9-54.el8.x86_64 - tzdata-java-2022c-1.el8.noarch - xorg-x11-font-utils-1:7.5-41.el8.x86_64 - xorg-x11-fonts-Type1-7.5-19.el8.noarch - -Complete! -Last metadata expiration check: 0:00:10 ago on Mon 12 Sep 2022 03:23:49 PM UTC. -Dependencies resolved. -============================================================================================== - Package - Arch Version Repository Size -============================================================================================== -Installing: - ords noarch 22.2.1-2.el8 yum.oracle.com_repo_OracleLinux_OL8_oracle_software_x86_64 83 M -Installing dependencies: - lsof x86_64 4.93.2-1.el8 ol8_baseos_latest 253 k - -Transaction Summary -============================================================================================== -Install 2 Packages - -Total download size: 83 M -Installed size: 87 M -Downloading Packages: -(1/2): lsof-4.93.2-1.el8.x86_64.rpm 3.0 MB/s | 253 kB 00:00 -(2/2): ords-22.2.1-2.el8.noarch.rpm 56 MB/s | 83 MB 00:01 --------------------------------------------------------------------------------- -Total 56 MB/s | 83 MB 00:01 -Running transaction check -Transaction check succeeded. -Running transaction test -Transaction test succeeded. -Running transaction - Preparing : 1/1 - Installing : lsof-4.93.2-1.el8.x86_64 1/2 - Running scriptlet: ords-22.2.1-2.el8.noarch 2/2 - Installing : ords-22.2.1-2.el8.noarch 2/2 - Running scriptlet: ords-22.2.1-2.el8.noarch 2/2 -INFO: Before starting ORDS service, run the below command as user oracle: - ords --config /etc/ords/config install - - Verifying : lsof-4.93.2-1.el8.x86_64 1/2 - Verifying : ords-22.2.1-2.el8.noarch 2/2 - -Installed: - lsof-4.93.2-1.el8.x86_64 ords-22.2.1-2.el8.noarch - -Complete! -Last metadata expiration check: 0:00:15 ago on Mon 12 Sep 2022 03:23:49 PM UTC. -Package iproute-5.15.0-4.el8.x86_64 is already installed. -Dependencies resolved. -Nothing to do. -Complete! -24 files removed -Removing intermediate container c08b8dac80a5 - ---> bb1a717f3e6e -Step 5/10 : RUN mkdir -p $ORDS_HOME/doc_root && mkdir -p $ORDS_HOME/error && mkdir -p $ORDS_HOME/secrets && chmod ug+x $ORDS_HOME/*.sh && groupadd -g 54322 dba && usermod -u 54321 -d /home/oracle -g dba -m -s /bin/bash oracle && chown -R oracle:dba $ORDS_HOME && echo "oracle ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers - ---> Running in 0103c070f4b6 -Removing intermediate container 0103c070f4b6 - ---> 089d06d9b198 -Step 6/10 : USER oracle - ---> Running in 51b1846c8c6f -Removing intermediate container 51b1846c8c6f - ---> 6c7b115954a4 -Step 7/10 : WORKDIR /home/oracle - ---> Running in 5862e2bc8df9 -Removing intermediate container 5862e2bc8df9 - ---> 28543543a88c -Step 8/10 : VOLUME ["$ORDS_HOME/config/ords"] - ---> Running in 465398d6f2bb -Removing intermediate container 465398d6f2bb - ---> 4037eb7f2f12 -Step 9/10 : EXPOSE 8888 - ---> Running in 2813ab5473f6 -Removing intermediate container 2813ab5473f6 - ---> 3410f1be2fff -Step 10/10 : CMD $ORDS_HOME/$RUN_FILE - ---> Running in 0a9a72408177 -Removing intermediate container 0a9a72408177 - ---> 2ef5dc95701b -Successfully built 2ef5dc95701b -Successfully tagged oracle/ords-dboper:22.2.1 - diff --git a/docs/multitenant/provisioning/ords_image.md b/docs/multitenant/provisioning/ords_image.md index 21abcbab..e2d1dcef 100644 --- a/docs/multitenant/provisioning/ords_image.md +++ b/docs/multitenant/provisioning/ords_image.md @@ -2,43 +2,60 @@ # Build ORDS Docker Image -In the below steps, we are building an ORDS Docker Image for ORDS Software. The image built can be later pushed to a local repository to be used later for a deployment. +This file contains the steps to create an ORDS based image to be used solely by the PDB life cycle multitentant controllers. **NOTE:** It is assumed that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-manage-pdb-life-cycle-using-oracle-db-operator-on-prem-database-controller) steps. -1. Clone the software using git: +#### Clone the software using git: + +> Under directory ./oracle-database-operator/ords you will find the [Dockerfile](../../../ords/Dockerfile) and [runOrdsSSL.sh](../../../ords/runOrdsSSL.sh) required to build the image. + ```sh git clone git@orahub.oci.oraclecorp.com:rac-docker-dev/oracle-database-operator.git cd oracle-database-operator/ords/ ``` -2. Login to the registry: container-registry.oracle.com +#### Login to the registry: container-registry.oracle.com **NOTE:** To login to this registry, you will need to the URL https://container-registry.oracle.com , Sign in, then click on "Java" and then accept the agreement. ```bash docker login container-registry.oracle.com -``` +``` -3. Login to a repo where you want to push your docker image (if needed) to pull during deployment in your environment. +#### Login to the your container registry + +Login to a repo where you want to push your docker image (if needed) to pull during deployment in your environment. ```bash docker login ``` -4. Build the docker image by using below command: +#### Build the image + +Build the docker image by using below command: ```bash -docker build -t oracle/ords-dboper:ords-latest . +docker build -t oracle/ords-dboper:latest . ``` +> If your are working behind a proxy mind to specify https_proxy and http_proxy during image creation -5. Check the docker image details using: +Check the docker image details using: ```bash docker images ``` -6. Tag and push the image to your docker repository. +> OUTPUT EXAMPLE +```bash +REPOSITORY TAG IMAGE ID CREATED SIZE +oracle/ords-dboper latest fdb17aa242f8 4 hours ago 1.46GB + +``` + +#### Tag and push the image + +Tag and push the image to your image repository. NOTE: We have the repo as `phx.ocir.io//oracle/ords:latest`. Please change as per your environment. @@ -47,18 +64,18 @@ docker tag oracle/ords-dboper:ords-latest phx.ocir.io//oracle/ords:la docker push phx.ocir.io//oracle/ords:latest ``` -7. Verify the image pushed to your docker repository. - -You can refer to below sample output for above steps as well. +#### In case of private image -8. Create a Kubernetes Secret for your docker repository to pull the image during deployment using the below command: +If you the image not be public then yuo need to create a secret containing the password of your image repository. +Create a Kubernetes Secret for your docker repository to pull the image during deployment using the below command: ```bash kubectl create secret generic container-registry-secret --from-file=.dockerconfigjson=./.docker/config.json --type=kubernetes.io/dockerconfigjson -n oracle-database-operator-system ``` -This Kubernetes secret will be provided in the .yaml file against the parameter `ordsImagePullSecret` to pull the ORDS Docker Image from your docker repository (if its a private repository). +Use the parameter `ordsImagePullSecret` to specify the container secrets in pod creation yaml file + +#### [Image createion example](../usecase01/logfiles/BuildImage.log) -## Sample Output -[Here](./ords_image.log) is the sample output for docker image created for ORDS latest version + diff --git a/docs/multitenant/provisioning/pdb_crd_resource.md b/docs/multitenant/provisioning/pdb_crd_resource.md deleted file mode 100644 index e69de29b..00000000 diff --git a/docs/multitenant/provisioning/plug_pdb.log b/docs/multitenant/provisioning/plug_pdb.log deleted file mode 100644 index 3a0be247..00000000 --- a/docs/multitenant/provisioning/plug_pdb.log +++ /dev/null @@ -1,100 +0,0 @@ --- Check the status of the PDB CRD Resources: - -% kubectl get pdbs -A -No resources found - - - --- Verify from the CDB: - -SQL> show pdbs - - CON_ID CON_NAME OPEN MODE RESTRICTED ----------- ------------------------------ ---------- ---------- - 2 PDB$SEED READ WRITE NO - - - --- Confirm the availability of the required .xml file: - -[oracle@goldhost1 ~]$ ls -lrt /tmp/pdbnewclone.xml --rw-r--r-- 1 oracle asmadmin 9920 Jun 27 06:26 /tmp/pdbnewclone.xml - - --- Use the below .yaml file for the plug in operation: - -% cat plug_pdb.yaml -# -# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: PDB -metadata: - name: pdb1 - namespace: oracle-database-operator-system - labels: - cdb: cdb-dev -spec: - cdbResName: "cdb-dev" - cdbName: "goldcdb" - pdbName: "pdbnew" - xmlFileName: "/tmp/pdbnewclone.xml" - fileNameConversions: "NONE" - sourceFileNameConversions: "NONE" - copyAction: "MOVE" - totalSize: "1G" - tempSize: "100M" - action: "Plug" - - - --- Apply the .yaml file: - -% kubectl apply -f plug_pdb.yaml -pdb.database.oracle.com/pdb1 created - - --- Monitor the logs from the Oracle DB Operator Pod: - -% kubectl logs -f pod/oracle-database-operator-controller-manager-76cb674c5c-f9wsd -n oracle-database-operator-system -. -. -2022-06-27T04:28:36Z DEBUG controller-runtime.webhook.webhooks received request {"webhook": "/validate-database-oracle-com-v1alpha1-pdb", "UID": "bfad69af-36be-4792-87e3-639323300167", "kind": "database.oracle.com/v1alpha1, Kind=PDB", "resource": {"group":"database.oracle.com","version":"v1alpha1","resource":"pdbs"}} -2022-06-27T04:28:36Z INFO pdb-webhook ValidateCreate-Validating PDB spec for : pdb1 -2022-06-27T04:28:36Z INFO pdb-webhook validateCommon {"name": "pdb1"} -2022-06-27T04:28:36Z INFO pdb-webhook Valdiating PDB Resource Action : PLUG -2022-06-27T04:28:36Z INFO pdb-webhook PDB Resource : pdb1 successfully validated for Action : PLUG -2022-06-27T04:28:36Z DEBUG controller-runtime.webhook.webhooks wrote response {"webhook": "/validate-database-oracle-com-v1alpha1-pdb", "code": 200, "reason": "", "UID": "bfad69af-36be-4792-87e3-639323300167", "allowed": true} -2022-06-27T04:28:36Z INFO controllers.PDB Reconcile requested {"onpremdboperator": "oracle-database-operator-system/pdb1"} -2022-06-27T04:28:36Z INFO controllers.PDB Adding finalizer {"managePDBDeletion": "oracle-database-operator-system/pdb1"} -2022-06-27T04:28:36Z INFO controllers.PDB Found PDB: pdb1 {"checkDuplicatePDB": "oracle-database-operator-system/pdb1"} -2022-06-27T04:28:36Z INFO controllers.PDB Validating PDB phase for: pdb1 {"validatePhase": "oracle-database-operator-system/pdb1", "Action": "PLUG"} -2022-06-27T04:28:36Z INFO controllers.PDB Validation complete {"validatePhase": "oracle-database-operator-system/pdb1"} -2022-06-27T04:28:36Z INFO controllers.PDB PDB: {"onpremdboperator": "oracle-database-operator-system/pdb1", "Name": "pdb1", "Phase": "Plugging", "Status": "false"} -2022-06-27T04:28:36Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T04:28:36Z INFO controllers.PDB Issuing REST call {"callAPI": "oracle-database-operator-system/pdb1", "URL": "http://cdb-dev-ords:8888/ords/_/db-api/latest/database/pdbs/", "Action": "POST"} -2022-06-27T04:28:36Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T04:29:07Z INFO controllers.PDB Successfully plugged PDB {"plugPDB": "oracle-database-operator-system/pdb1", "PDB Name": "pdbnew"} -2022-06-27T04:29:07Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T04:29:07Z DEBUG events Normal {"object": {"kind":"PDB","namespace":"oracle-database-operator-system","name":"pdb1","uid":"dd9bef3c-e493-4d5a-ae82-b24cbf5d0be3","apiVersion":"database.oracle.com/v1alpha1","resourceVersion":"101465242"}, "reason": "Created", "message": "PDB 'pdbnew' plugged successfully"} -2022-06-27T04:29:07Z INFO controllers.PDB Issuing REST call {"callAPI": "oracle-database-operator-system/pdb1", "URL": "http://cdb-dev-ords:8888/ords/_/db-api/latest/database/pdbs/pdbnew/status", "Action": "GET"} -2022-06-27T04:29:07Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T04:29:07Z INFO controllers.PDB Successfully obtained PDB state {"getPDBState": "oracle-database-operator-system/pdb1", "PDB Name": "pdbnew", "State": "READ WRITE"} -2022-06-27T04:29:07Z INFO controllers.PDB Reconcile completed {"onpremdboperator": "oracle-database-operator-system/pdb1"} - - - --- Confirm the PDB CRD resource has been created and the PDB has been plugged in to the target CDB: - -% kubectl get pdbs -A -NAMESPACE NAME CONNECT STRING CDB NAME PDB NAME PDB STATE PDB SIZE STATUS MESSAGE -oracle-database-operator-system pdb1 goldhost-scan.lbsub52b3b1cae.okecluster.oraclevcn.com:1521/pdbnew goldcdb pdbnew READ WRITE 1G Ready Success - - -SQL> show pdbs - - CON_ID CON_NAME OPEN MODE RESTRICTED ----------- ------------------------------ ---------- ---------- - 2 PDB$SEED READ WRITE NO - 4 PDBNEW READ WRITE NO diff --git a/docs/multitenant/provisioning/plug_pdb.md b/docs/multitenant/provisioning/plug_pdb.md deleted file mode 100644 index 02645f52..00000000 --- a/docs/multitenant/provisioning/plug_pdb.md +++ /dev/null @@ -1,44 +0,0 @@ -# Plug in a PDB using Oracle DB Operator On-Prem Controller in a target CDB - -In this use case, a PDB is plugged in using Oracle DB Operator On-Prem controller using an existing .xml file which was generated when the PDB was unplugged from this target CDB or another CDB. - -To plug in a PDB CRD Resource, a sample .yaml file is available here: [config/samples/onpremdb/pdb_plug.yaml](../../../config/samples/onpremdb/pdb_plug.yaml) - -**NOTE:** It is assumed that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-manage-pdb-life-cycle-using-oracle-db-operator-on-prem-database-controller) steps. - -This example uses `plug_pdb.yaml` to plug in a PDB to a target CDB using Oracle DB Operator On-Prem Controller with: - -- Pluggable Database CRD Resource Name as `pdb1` -- Pluggable Database (PDB) Name as `pdbnew` -- Target CDB CRD Resource Name as `cdb-dev` -- CDB Name as `goldcdb` -- Action to be taken on the PDB as `Plug` -- XML metadata filename as `/tmp/pdbnewclone.xml` -- Source File Name Conversion as `NONE` -- File Name Conversion as `NONE` -- Copy Action as `MOVE` -- PDB Size as `1G` -- Temporary tablespace Size as `100M` - -**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [config/crd/bases/database.oracle.com_pdbs.yaml](../../../config/crd/bases/database.oracle.com_pdbs.yaml) - -**NOTE:** Before performing the plug inoperation, you will first need to confirm the availability of the .xml file and the PDB datafiles. - -Use the file: [plug_pdb.yaml](./plug_pdb.yaml) for this use case as below: - -1. Deploy the .yaml file: -```sh -% kubectl apply -f plug_pdb.yaml -``` - -2. Monitor the Oracle DB Operator Pod for the progress of the PDB Unplug operation: - -NOTE: Check the DB Operator Pod name in your environment. - -```sh -% kubectl logs -f pod/oracle-database-operator-controller-manager-76cb674c5c-f9wsd -n oracle-database-operator-system -``` - -## Sample Output - -[Here](./plug_pdb.log) is the sample output for a PDB created using Oracle DB Operator On-Prem Controller using file [plug_pdb.yaml](./plug_pdb.yaml) diff --git a/docs/multitenant/provisioning/plug_pdb.yaml b/docs/multitenant/provisioning/plug_pdb.yaml deleted file mode 100644 index a27d3255..00000000 --- a/docs/multitenant/provisioning/plug_pdb.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# -# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: PDB -metadata: - name: pdb1 - namespace: oracle-database-operator-system - labels: - cdb: cdb-dev -spec: - cdbResName: "cdb-dev" - cdbName: "goldcdb" - pdbName: "pdbnew" - xmlFileName: "/tmp/pdbnewclone.xml" - fileNameConversions: "NONE" - sourceFileNameConversions: "NONE" - copyAction: "MOVE" - totalSize: "1G" - tempSize: "100M" - action: "Plug" diff --git a/docs/multitenant/provisioning/quickOKEcreation.md b/docs/multitenant/provisioning/quickOKEcreation.md new file mode 100644 index 00000000..19d9323e --- /dev/null +++ b/docs/multitenant/provisioning/quickOKEcreation.md @@ -0,0 +1,136 @@ + + +### Quick Oke creation script + +Use this script to create quickly an OKE cluster in your OCI. + +#### Prerequisties: +- ocicli is properly configured on your client +- make is installed on your client +- vnc is already configured +- ssh key is configured (public key available under directory ~/.ssh) +- edit make providing all the information about your compartment, vnc,subnet,lb subnet and nd subnet (exported variables in the header section) + + +#### Execution: + +```bash +make all +``` + +Monitor the OKE from OCI console + +#### Makefile +```makefile +.EXPORT_ALL_VARIABLES: + +export CMPID=[.... COMPARTMENT ID.............] +export VNCID=[.... VNC ID ....................] +export ENDID=[.... SUBNET END POINT ID .......] +export LBSID=[.....LB SUBNET ID...............] +export NDSID=[.....NODE SUBNET ID.............] + + +#ssh public key +export KEYFL=~/.ssh/id_rsa.pub + +#cluster version +export KSVER=v1.27.2 + +#cluster name +export CLUNM=myoke + +#pool name +export PLNAM=Pool1 + +#logfile +export LOGFILE=./clustoke.log + +#shape +export SHAPE=VM.Standard.E4.Flex + +OCI=/home/oracle/bin/oci +CUT=/usr/bin/cut +KUBECTL=/usr/bin/kubectl +CAT=/usr/bin/cat + +all: cluster waitcluster pool waitpool config desccluster + +cluster: + @echo " - CREATING CLUSTER " + @$(OCI) ce cluster create \ + --compartment-id $(CMPID) \ + --kubernetes-version $(KSVER) \ + --name $(CLUNM) \ + --vcn-id $(VNCID) \ + --endpoint-subnet-id $(ENDID) \ + --service-lb-subnet-ids '["'$(LBSID)'"]' \ + --endpoint-public-ip-enabled true \ + --persistent-volume-freeform-tags '{"$(CLUNM)" : "OKE"}' 1>$(LOGFILE) 2>&1 + +waitcluster: + @while [ `$(OCI) ce cluster list --compartment-id $(CMPID) \ + --name $(CLUNM) --lifecycle-state ACTIVE --query data[0].id \ + --raw-output |wc -l ` -eq 0 ] ; do sleep 5 ; done + @echo " - CLUSTER CREATED" + + +pool: + @echo " - CREATING POOL" + @$(eval PBKEY :=$(shell $(CAT) $(KEYFL)|grep -v " PUBLIC KEY")) + @$(OCI) ce node-pool create \ + --cluster-id `$(OCI) ce cluster list --compartment-id $(CMPID) \ + --name $(CLUNM) --lifecycle-state ACTIVE --query data[0].id --raw-output` \ + --compartment-id $(CMPID) \ + --kubernetes-version $(KSVER) \ + --name $(PLNAM) \ + --node-shape $(SHAPE) \ + --node-shape-config '{"memoryInGBs": 8.0, "ocpus": 1.0}' \ + --node-image-id `$(OCI) compute image list \ + --operating-system 'Oracle Linux' --operating-system-version 7.9 \ + --sort-by TIMECREATED --compartment-id $(CMPID) --shape $(SHAPE) \ + --query data[1].id --raw-output` \ + --node-boot-volume-size-in-gbs 50 \ + --ssh-public-key "$(PBKEY)" \ + --size 3 \ + --placement-configs '[{"availabilityDomain": "'`oci iam availability-domain list \ + --compartment-id $(CMPID) \ + --query data[0].name --raw-output`'", "subnetId": "'$(NDSID)'"}]' 1>>$(LOGFILE) 2>&1 + +waitpool: + $(eval CLSID :=$(shell $(OCI) ce cluster list --compartment-id $(CMPID) \ + --name $(CLUNM) --lifecycle-state ACTIVE --query data[0].id --raw-output)) + @while [ `$(OCI) ce node-pool list --compartment-id $(CMPID) \ + --lifecycle-state ACTIVE --cluster-id $(CLSID) \ + --query data[0].id --raw-output |wc -l ` -eq 0 ] ; do sleep 5 ; done + @sleep 10 + $(eval PLLID :=$(shell $(OCI) ce node-pool list --compartment-id $(CMPID) \ + --lifecycle-state ACTIVE --cluster-id $(CLSID) --query data[0].id --raw-output)) + @echo " - POOL CREATED" + +config: + @$(OCI) ce cluster create-kubeconfig --cluster-id \ + `$(OCI) ce cluster list \ + --compartment-id $(CMPID) --name $(CLUNM) --lifecycle-state ACTIVE \ + --query data[0].id --raw-output` \ + --file $(HOME)/.kube/config --region \ + `$(OCI) ce cluster list \ + --compartment-id $(CMPID) --name $(CLUNM) --lifecycle-state ACTIVE \ + --query data[0].id --raw-output|$(CUT) -f4 -d. ` \ + --token-version 2.0.0 --kube-endpoint PUBLIC_ENDPOINT + @echo " - KUBECTL PUBLIC ENDPOINT CONFIGURED" + + +desccluster: + @$(eval TMPSP := $(shell date "+%y/%m/%d:%H:%M" )) + $(KUBECTL) get nodes -o wide + $(KUBECTL) get storageclass + +checkvol: + $(OCI) bv volume list \ + --compartment-id $(CMPID) \ + --lifecycle-state AVAILABLE \ + --query 'data[?"freeform-tags".stackgres == '\''OKE'\''].id' +``` + + diff --git a/docs/multitenant/provisioning/singlenamespace/cdb_create.yaml b/docs/multitenant/provisioning/singlenamespace/cdb_create.yaml new file mode 100644 index 00000000..01fc0a18 --- /dev/null +++ b/docs/multitenant/provisioning/singlenamespace/cdb_create.yaml @@ -0,0 +1,44 @@ +apiVersion: database.oracle.com/v1alpha1 +kind: CDB +metadata: + name: cdb-dev + namespace: oracle-database-operator-system +spec: + cdbName: "DB12" + ordsImage: ".............your registry............./ords-dboper:latest" + ordsImagePullPolicy: "Always" + dbTnsurl : "...Container tns alias....." + replicas: 1 + sysAdminPwd: + secret: + secretName: "cdb1-secret" + key: "sysadmin_pwd" + ordsPwd: + secret: + secretName: "cdb1-secret" + key: "ords_pwd" + cdbAdminUser: + secret: + secretName: "cdb1-secret" + key: "cdbadmin_user" + cdbAdminPwd: + secret: + secretName: "cdb1-secret" + key: "cdbadmin_pwd" + webServerUser: + secret: + secretName: "cdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "cdb1-secret" + key: "webserver_pwd" + cdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + cdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + diff --git a/docs/multitenant/provisioning/singlenamespace/cdb_secret.yaml b/docs/multitenant/provisioning/singlenamespace/cdb_secret.yaml new file mode 100644 index 00000000..567b90a4 --- /dev/null +++ b/docs/multitenant/provisioning/singlenamespace/cdb_secret.yaml @@ -0,0 +1,17 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: v1 +kind: Secret +metadata: + name: cdb1-secret + namespace: oracle-database-operator-system +type: Opaque +data: + ords_pwd: ".....base64 encoded password...." + sysadmin_pwd: ".....base64 encoded password...." + cdbadmin_user: ".....base64 encoded password...." + cdbadmin_pwd: ".....base64 encoded password...." + webserver_user: ".....base64 encoded password...." + webserver_pwd: ".....base64 encoded password...." diff --git a/docs/multitenant/provisioning/singlenamespace/pdb_clone.yaml b/docs/multitenant/provisioning/singlenamespace/pdb_clone.yaml new file mode 100644 index 00000000..0ecc3c70 --- /dev/null +++ b/docs/multitenant/provisioning/singlenamespace/pdb_clone.yaml @@ -0,0 +1,50 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb2 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdb2_clone" + srcPdbName: "pdbdev" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + assertivePdbDeletion: true + adminName: + secret: + secretName: "pdb1-secret" + key: "sysadmin_user" + adminPwd: + secret: + secretName: "pdb1-secret" + key: "sysadmin_pwd" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "pdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "pdb1-secret" + key: "webserver_pwd" + action: "Clone" diff --git a/docs/multitenant/provisioning/singlenamespace/pdb_close.yaml b/docs/multitenant/provisioning/singlenamespace/pdb_close.yaml new file mode 100644 index 00000000..5917d33a --- /dev/null +++ b/docs/multitenant/provisioning/singlenamespace/pdb_close.yaml @@ -0,0 +1,44 @@ +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + adminName: + secret: + secretName: "pdb1-secret" + key: "sysadmin_user" + adminPwd: + secret: + secretName: "pdb1-secret" + key: "sysadmin_pwd" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "pdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "pdb1-secret" + key: "webserver_pwd" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" + diff --git a/docs/multitenant/provisioning/singlenamespace/pdb_create.yaml b/docs/multitenant/provisioning/singlenamespace/pdb_create.yaml new file mode 100644 index 00000000..be3581ad --- /dev/null +++ b/docs/multitenant/provisioning/singlenamespace/pdb_create.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + adminName: + secret: + secretName: "pdb1-secret" + key: "sysadmin_user" + adminPwd: + secret: + secretName: "pdb1-secret" + key: "sysadmin_pwd" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "pdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "pdb1-secret" + key: "webserver_pwd" + fileNameConversions: "NONE" + tdeImport: false + totalSize: "1G" + tempSize: "100M" + action: "Create" + assertivePdbDeletion: true + diff --git a/docs/multitenant/provisioning/singlenamespace/pdb_delete.yaml b/docs/multitenant/provisioning/singlenamespace/pdb_delete.yaml new file mode 100644 index 00000000..c22b546a --- /dev/null +++ b/docs/multitenant/provisioning/singlenamespace/pdb_delete.yaml @@ -0,0 +1,34 @@ +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + pdbName: "pdbdev" + action: "Delete" + dropAction: "INCLUDING" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "pdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "pdb1-secret" + key: "webserver_pwd" + diff --git a/docs/multitenant/provisioning/singlenamespace/pdb_open.yaml b/docs/multitenant/provisioning/singlenamespace/pdb_open.yaml new file mode 100644 index 00000000..25fdccc4 --- /dev/null +++ b/docs/multitenant/provisioning/singlenamespace/pdb_open.yaml @@ -0,0 +1,43 @@ +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + adminName: + secret: + secretName: "pdb1-secret" + key: "sysadmin_user" + adminPwd: + secret: + secretName: "pdb1-secret" + key: "sysadmin_pwd" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "pdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "pdb1-secret" + key: "webserver_pwd" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" diff --git a/docs/multitenant/provisioning/singlenamespace/pdb_plug.yaml b/docs/multitenant/provisioning/singlenamespace/pdb_plug.yaml new file mode 100644 index 00000000..77c00b9c --- /dev/null +++ b/docs/multitenant/provisioning/singlenamespace/pdb_plug.yaml @@ -0,0 +1,46 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + fileNameConversions: "NONE" + sourceFileNameConversions: "NONE" + copyAction: "MOVE" + totalSize: "1G" + tempSize: "100M" + action: "Plug" + assertivePdbDeletion: true + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "pdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "pdb1-secret" + key: "webserver_pwd" + + diff --git a/docs/multitenant/provisioning/pdb_secret.yaml b/docs/multitenant/provisioning/singlenamespace/pdb_secret.yaml similarity index 54% rename from docs/multitenant/provisioning/pdb_secret.yaml rename to docs/multitenant/provisioning/singlenamespace/pdb_secret.yaml index cf385dda..60d95d76 100644 --- a/docs/multitenant/provisioning/pdb_secret.yaml +++ b/docs/multitenant/provisioning/singlenamespace/pdb_secret.yaml @@ -7,7 +7,10 @@ kind: Secret metadata: name: pdb1-secret namespace: oracle-database-operator-system -type: Opaque -data: - sysadmin_user: "[ base64 encode values ]" - sysadmin_pwd: "[base64 encode values ]" +type: Opaque +data: + sysadmin_user: ".....base64 encoded password...." + sysadmin_pwd: ".....base64 encoded password...." + webserver_user: ".....base64 encoded password...." + webserver_pwd: ".....base64 encoded password...." + diff --git a/docs/multitenant/provisioning/pdb.yaml b/docs/multitenant/provisioning/singlenamespace/pdb_unplug.yaml similarity index 50% rename from docs/multitenant/provisioning/pdb.yaml rename to docs/multitenant/provisioning/singlenamespace/pdb_unplug.yaml index 82941185..085d337e 100644 --- a/docs/multitenant/provisioning/pdb.yaml +++ b/docs/multitenant/provisioning/singlenamespace/pdb_unplug.yaml @@ -11,17 +11,29 @@ metadata: cdb: cdb-dev spec: cdbResName: "cdb-dev" - cdbName: "goldcdb" - pdbName: "pdbnew" - adminName: + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "Unplug" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: secret: secretName: "pdb1-secret" - key: "sysadmin_user" - adminPwd: + key: "webserver_user" + webServerPwd: secret: secretName: "pdb1-secret" - key: "sysadmin_pwd" - fileNameConversions: "NONE" - totalSize: "1G" - tempSize: "100M" - action: "Create" + key: "webserver_pwd" + diff --git a/docs/multitenant/provisioning/unplug_pdb.log b/docs/multitenant/provisioning/unplug_pdb.log deleted file mode 100644 index c0995f83..00000000 --- a/docs/multitenant/provisioning/unplug_pdb.log +++ /dev/null @@ -1,165 +0,0 @@ --- Check the status of the PDB CRD resource: - -% kubectl get pdbs -A -NAMESPACE NAME CONNECT STRING CDB NAME PDB NAME PDB STATE PDB SIZE STATUS MESSAGE -oracle-database-operator-system pdb1 goldhost-scan.lbsub52b3b1cae.okecluster.oraclevcn.com:1521/pdbnew goldcdb pdbnew READ WRITE 1G Ready Success - - --- Verify the status from the CDB: - -SQL> show pdbs - - CON_ID CON_NAME OPEN MODE RESTRICTED ----------- ------------------------------ ---------- ---------- - 2 PDB$SEED READ WRITE NO - 3 PDBNEW READ WRITE NO - - --- Use the below .yaml file to close the PDB: - -% cat modify_pdb_close.yaml -# -# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: PDB -metadata: - name: pdb1 - namespace: oracle-database-operator-system - labels: - cdb: cdb-dev -spec: - cdbResName: "cdb-dev" - cdbName: "goldcdb" - pdbName: "pdbnew" - pdbState: "CLOSE" - modifyOption: "IMMEDIATE" - action: "Modify" - - --- Apply the .yaml file: - -% kubectl apply -f modify_pdb_close.yaml -pdb.database.oracle.com/pdb1 configured - - --- Monitor the Oracle DB Operator Pod logs: - -% kubectl logs -f pod/oracle-database-operator-controller-manager-76cb674c5c-f9wsd -n oracle-database-operator-system -. -. -2022-06-27T04:25:00Z DEBUG controller-runtime.webhook.webhooks received request {"webhook": "/mutate-database-oracle-com-v1alpha1-pdb", "UID": "1623a6e2-d7dc-4b0f-8aa8-efada76cac13", "kind": "database.oracle.com/v1alpha1, Kind=PDB", "resource": {"group":"database.oracle.com","version":"v1alpha1","resource":"pdbs"}} -2022-06-27T04:25:00Z INFO pdb-webhook Setting default values in PDB spec for : pdb1 -2022-06-27T04:25:00Z DEBUG controller-runtime.webhook.webhooks wrote response {"webhook": "/mutate-database-oracle-com-v1alpha1-pdb", "code": 200, "reason": "", "UID": "1623a6e2-d7dc-4b0f-8aa8-efada76cac13", "allowed": true} -2022-06-27T04:25:00Z INFO controllers.PDB Reconcile requested {"onpremdboperator": "oracle-database-operator-system/pdb1"} -2022-06-27T04:25:00Z INFO controllers.PDB Validating PDB phase for: pdb1 {"validatePhase": "oracle-database-operator-system/pdb1", "Action": "MODIFY"} -2022-06-27T04:25:00Z INFO controllers.PDB Validation complete {"validatePhase": "oracle-database-operator-system/pdb1"} -2022-06-27T04:25:00Z INFO controllers.PDB PDB: {"onpremdboperator": "oracle-database-operator-system/pdb1", "Name": "pdb1", "Phase": "Modifying", "Status": "false"} -2022-06-27T04:25:00Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T04:25:00Z INFO controllers.PDB Issuing REST call {"callAPI": "oracle-database-operator-system/pdb1", "URL": "http://cdb-dev-ords:8888/ords/_/db-api/latest/database/pdbs/pdbnew/status", "Action": "GET"} -2022-06-27T04:25:00Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T04:25:00Z INFO controllers.PDB Successfully obtained PDB state {"getPDBState": "oracle-database-operator-system/pdb1", "PDB Name": "pdbnew", "State": "READ WRITE"} -2022-06-27T04:25:00Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T04:25:01Z INFO controllers.PDB Issuing REST call {"callAPI": "oracle-database-operator-system/pdb1", "URL": "http://cdb-dev-ords:8888/ords/_/db-api/latest/database/pdbs/pdbnew/status", "Action": "POST"} -2022-06-27T04:25:01Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T04:25:02Z INFO controllers.PDB Successfully modified PDB state {"modifyPDB": "oracle-database-operator-system/pdb1", "PDB Name": "pdbnew"} -2022-06-27T04:25:02Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T04:25:02Z DEBUG events Normal {"object": {"kind":"PDB","namespace":"oracle-database-operator-system","name":"pdb1","uid":"447346c7-cfb0-43ed-abb2-a0fac844a3e4","apiVersion":"database.oracle.com/v1alpha1","resourceVersion":"101464133"}, "reason": "Modified", "message": "PDB 'pdbnew' modified successfully"} -2022-06-27T04:25:02Z INFO controllers.PDB Issuing REST call {"callAPI": "oracle-database-operator-system/pdb1", "URL": "http://cdb-dev-ords:8888/ords/_/db-api/latest/database/pdbs/pdbnew/status", "Action": "GET"} -2022-06-27T04:25:02Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T04:25:02Z INFO controllers.PDB Successfully obtained PDB state {"getPDBState": "oracle-database-operator-system/pdb1", "PDB Name": "pdbnew", "State": "MOUNTED"} -2022-06-27T04:25:02Z INFO controllers.PDB Reconcile completed {"onpremdboperator": "oracle-database-operator-system/pdb1"} - - - --- Confirm the PDB is now in MOUNT status: - -% kubectl get pdbs -A -NAMESPACE NAME CONNECT STRING CDB NAME PDB NAME PDB STATE PDB SIZE STATUS MESSAGE -oracle-database-operator-system pdb1 goldhost-scan.lbsub52b3b1cae.okecluster.oraclevcn.com:1521/pdbnew goldcdb pdbnew MOUNTED 1G Ready Success - - -SQL> show pdbs - - CON_ID CON_NAME OPEN MODE RESTRICTED ----------- ------------------------------ ---------- ---------- - 2 PDB$SEED READ WRITE NO - 3 PDBNEW MOUNTED - - - - --- Use the below .yaml file to unplug the PDB: - -% cat unplug_pdb.yaml -# -# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: PDB -metadata: - name: pdb1 - namespace: oracle-database-operator-system - labels: - cdb: cdb-dev -spec: - cdbResName: "cdb-dev" - cdbName: "goldcdb" - pdbName: "pdbnew" - xmlFileName: "/tmp/pdbnewclone.xml" - action: "Unplug" - - --- Apply the .yaml file: - -% kubectl apply -f unplug_pdb.yaml -pdb.database.oracle.com/pdb1 configured - - --- Monitor the Oracle DB Operator Pod logs: - -% kubectl logs -f pod/oracle-database-operator-controller-manager-76cb674c5c-f9wsd -n oracle-database-operator-system -. -. -2022-06-27T04:26:10Z DEBUG controller-runtime.webhook.webhooks received request {"webhook": "/mutate-database-oracle-com-v1alpha1-pdb", "UID": "0f292426-8839-46b6-ba30-b3ffeee7e644", "kind": "database.oracle.com/v1alpha1, Kind=PDB", "resource": {"group":"database.oracle.com","version":"v1alpha1","resource":"pdbs"}} -2022-06-27T04:26:10Z INFO pdb-webhook Setting default values in PDB spec for : pdb1 -2022-06-27T04:26:10Z DEBUG controller-runtime.webhook.webhooks wrote response {"webhook": "/mutate-database-oracle-com-v1alpha1-pdb", "code": 200, "reason": "", "UID": "0f292426-8839-46b6-ba30-b3ffeee7e644", "allowed": true} -2022-06-27T04:26:10Z INFO controllers.PDB Reconcile requested {"onpremdboperator": "oracle-database-operator-system/pdb1"} -2022-06-27T04:26:10Z INFO controllers.PDB Validating PDB phase for: pdb1 {"validatePhase": "oracle-database-operator-system/pdb1", "Action": "UNPLUG"} -2022-06-27T04:26:10Z INFO controllers.PDB Validation complete {"validatePhase": "oracle-database-operator-system/pdb1"} -2022-06-27T04:26:10Z INFO controllers.PDB PDB: {"onpremdboperator": "oracle-database-operator-system/pdb1", "Name": "pdb1", "Phase": "Unplugging", "Status": "false"} -2022-06-27T04:26:10Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T04:26:10Z INFO controllers.PDB Issuing REST call {"callAPI": "oracle-database-operator-system/pdb1", "URL": "http://cdb-dev-ords:8888/ords/_/db-api/latest/database/pdbs/pdbnew/", "Action": "POST"} -2022-06-27T04:26:10Z INFO controllers.PDB Found CR for CDB {"getCDBResource": "oracle-database-operator-system/pdb1", "Name": "cdb-dev", "CR Name": "cdb-dev"} -2022-06-27T04:26:18Z INFO controllers.PDB Removing finalizer {"unplugPDB": "oracle-database-operator-system/pdb1"} -2022-06-27T04:26:19Z DEBUG controller-runtime.webhook.webhooks received request {"webhook": "/validate-database-oracle-com-v1alpha1-pdb", "UID": "ce9e1a52-a372-4e3b-b148-c1e31bcb26f8", "kind": "database.oracle.com/v1alpha1, Kind=PDB", "resource": {"group":"database.oracle.com","version":"v1alpha1","resource":"pdbs"}} -2022-06-27T04:26:19Z INFO pdb-webhook ValidateUpdate-Validating PDB spec for : pdb1 -2022-06-27T04:26:19Z INFO pdb-webhook validateCommon {"name": "pdb1"} -2022-06-27T04:26:19Z INFO pdb-webhook Valdiating PDB Resource Action : UNPLUG -2022-06-27T04:26:19Z DEBUG controller-runtime.webhook.webhooks wrote response {"webhook": "/validate-database-oracle-com-v1alpha1-pdb", "code": 200, "reason": "", "UID": "ce9e1a52-a372-4e3b-b148-c1e31bcb26f8", "allowed": true} -2022-06-27T04:26:19Z INFO controllers.PDB Successfully unplugged PDB resource {"unplugPDB": "oracle-database-operator-system/pdb1"} -2022-06-27T04:26:19Z INFO controllers.PDB Reconcile completed {"onpremdboperator": "oracle-database-operator-system/pdb1"} -2022-06-27T04:26:19Z DEBUG events Normal {"object": {"kind":"PDB","namespace":"oracle-database-operator-system","name":"pdb1","uid":"447346c7-cfb0-43ed-abb2-a0fac844a3e4","apiVersion":"database.oracle.com/v1alpha1","resourceVersion":"101464533"}, "reason": "Unplugged", "message": "PDB 'pdbnew' unplugged successfully"} - - - --- Confirm the PDB has been unplugged: - -% kubectl get pdbs -A -No resources found - - -SQL> show pdbs - - CON_ID CON_NAME OPEN MODE RESTRICTED ----------- ------------------------------ ---------- ---------- - 2 PDB$SEED READ WRITE NO - - - --- Confirm the .xml file generated in the CDB host: - -[oracle@goldhost1 ~]$ ls -lrt /tmp/pdbnewclone.xml --rw-r--r-- 1 oracle asmadmin 9920 Jun 27 06:26 /tmp/pdbnewclone.xml diff --git a/docs/multitenant/provisioning/unplug_pdb.md b/docs/multitenant/provisioning/unplug_pdb.md deleted file mode 100644 index fb98fc8b..00000000 --- a/docs/multitenant/provisioning/unplug_pdb.md +++ /dev/null @@ -1,39 +0,0 @@ -# Unplug a PDB using Oracle DB Operator On-Prem Controller in a target CDB - -In this use case, a PDB is unplugged using Oracle DB Operator On-Prem controller. - -To unplug a PDB CRD Resource, a sample .yaml file is available here: [config/samples/onpremdb/pdb_unplug.yaml](../../../config/samples/onpremdb/pdb_unplug.yaml) - -**NOTE:** It is assumed that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-manage-pdb-life-cycle-using-oracle-db-operator-on-prem-database-controller) steps. - -This example uses `unplug_pdb.yaml` to unplug a PDB from a target CDB using Oracle DB Operator On-Prem Controller with: - -- Pluggable Database CRD Resource Name as `pdb1` -- Pluggable Database (PDB) Name as `pdbnew` -- Target CDB CRD Resource Name as `cdb-dev` -- CDB Name as `goldcdb` -- Action to be taken on the PDB as `Unplug` -- XML metadata filename as `/tmp/pdbnewclone.xml` - -**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [config/crd/bases/database.oracle.com_pdbs.yaml](../../../config/crd/bases/database.oracle.com_pdbs.yaml) - -**NOTE:** Before performing the unplug operation on the PDB CRD Resource, you will first need to perform the Modify Operation on that PDB CRD resource to Close the the PDB. After that you will be able to perform the Unplug operation. Please refer to the use case to modify the PDB state to Close. - -Use the file: [unplug_pdb.yaml](./unplug_pdb.yaml) for this use case as below: - -1. Deploy the .yaml file: -```sh -% kubectl apply -f unplug_pdb.yaml -``` - -2. Monitor the Oracle DB Operator Pod for the progress of the PDB Unplug operation: - -NOTE: Check the DB Operator Pod name in your environment. - -```sh -% kubectl logs -f pod/oracle-database-operator-controller-manager-76cb674c5c-f9wsd -n oracle-database-operator-system -``` - -## Sample Output - -[Here](./unplug_pdb.log) is the sample output for a PDB created using Oracle DB Operator On-Prem Controller using file [unplug_pdb.yaml](./unplug_pdb.yaml) diff --git a/docs/multitenant/provisioning/unplug_pdb.yaml b/docs/multitenant/provisioning/unplug_pdb.yaml deleted file mode 100644 index c9915b28..00000000 --- a/docs/multitenant/provisioning/unplug_pdb.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# -# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 -kind: PDB -metadata: - name: pdb1 - namespace: oracle-database-operator-system - labels: - cdb: cdb-dev -spec: - cdbResName: "cdb-dev" - cdbName: "goldcdb" - pdbName: "pdbnew" - xmlFileName: "/tmp/pdbnewclone.xml" - action: "Unplug" diff --git a/docs/multitenant/provisioning/validation_error.md b/docs/multitenant/provisioning/validation_error.md deleted file mode 100644 index ec527cdb..00000000 --- a/docs/multitenant/provisioning/validation_error.md +++ /dev/null @@ -1,73 +0,0 @@ -#Validation and Errors - -## Kubernetes Events -You can check Kubernetes events for any errors or status updates as shown below: -```sh -$ kubectl get events -A -NAMESPACE LAST SEEN TYPE REASON OBJECT MESSAGE -oracle-database-operator-system 58m Warning Failed pod/cdb-dev-ords-qiigr Error: secret "cdb1-secret" not found -oracle-database-operator-system 56m Normal DeletedORDSPod cdb/cdb-dev Deleted ORDS Pod(s) for cdb-dev -oracle-database-operator-system 56m Normal DeletedORDSService cdb/cdb-dev Deleted ORDS Service for cdb-dev -... -oracle-database-operator-system 26m Warning OraError pdb/pdb1 ORA-65016: FILE_NAME_CONVERT must be specified... -oracle-database-operator-system 24m Warning OraError pdb/pdb2 ORA-65011: Pluggable database DEMOTEST does not exist. -... -oracle-database-operator-system 20m Normal Created pdb/pdb1 PDB 'demotest' created successfully -... -oracle-database-operator-system 17m Warning OraError pdb/pdb3 ORA-65012: Pluggable database DEMOTEST already exists... -``` - -In case of successfull operation, you can see messages like below: - -```sh -% kubectl get events -A -NAMESPACE LAST SEEN TYPE REASON OBJECT MESSAGE -kube-system 33s Warning BackOff pod/kube-apiserver Back-off restarting failed container -oracle-database-operator-system 59m Normal CreatedORDSService cdb/cdb-dev Created ORDS Service for cdb-dev -oracle-database-operator-system 51m Normal Created pdb/pdb1-clone PDB 'pdbnewclone' cloned successfully -oracle-database-operator-system 49m Normal Modified pdb/pdb1-clone PDB 'pdbnewclone' modified successfully -oracle-database-operator-system 47m Normal Deleted pdb/pdb1-clone PDB 'pdbnewclone' dropped successfully -oracle-database-operator-system 53m Normal Created pdb/pdb1 PDB 'pdbnew' created successfully -oracle-database-operator-system 44m Normal Modified pdb/pdb1 PDB 'pdbnew' modified successfully -oracle-database-operator-system 42m Normal Unplugged pdb/pdb1 PDB 'pdbnew' unplugged successfully -oracle-database-operator-system 39m Normal Created pdb/pdb1 PDB 'pdbnew' plugged successfully -``` - -## CDB Validation and Errors - -Validation is done at the time of CDB resource creation as shown below: -```sh -$ kubectl apply -f cdb1.yaml -The PDB "cdb-dev" is invalid: -* spec.dbServer: Required value: Please specify Database Server Name or IP Address -* spec.dbPort: Required value: Please specify DB Server Port -* spec.ordsImage: Required value: Please specify name of ORDS Image to be used -``` - -Apart from events, listing of CDBs will also show the possible reasons why a particular CDB CR could not be created as shown below: -```sh - $ kubectl get cdbs -A - - NAMESPACE NAME CDB NAME DB SERVER DB PORT SCAN NAME STATUS MESSAGE - oracle-database-operator-system cdb-dev devdb 172.17.0.4 1521 devdb Failed Secret not found:cdb1-secret -``` - -## PDB Validation and Errors - -Validation is done at the time of PDB resource creation as shown below: -```sh -$ kubectl apply -f pdb1.yaml -The PDB "pdb1" is invalid: -* spec.cdbResName: Required value: Please specify the name of the CDB Kubernetes resource to use for PDB operations -* spec.pdbName: Required value: Please specify name of the PDB to be created -* spec.adminPwd: Required value: Please specify PDB System Administrator Password -* spec.fileNameConversions: Required value: Please specify a value for fileNameConversions. Values can be a filename convert pattern or NONE -``` - -Similarly, for PDBs, listing of PDBs will also show the possible reasons why a particular PDB CR could not be created as shown below: -```sh -$ kubectl get pdbs -A -NAMESPACE NAME CONNECT STRING CDB NAME PDB NAME PDB SIZE STATUS MESSAGE -oracle-database-operator-system pdb1 democdb demotest1 Failed Secret not found:pdb12-secret -oracle-database-operator-system pdb2 democdb demotest2 Failed ORA-65016: FILE_NAME_CONVERT must be specified... -``` diff --git a/docs/multitenant/usecase01/README.md b/docs/multitenant/usecase01/README.md index e8109110..7352257e 100644 --- a/docs/multitenant/usecase01/README.md +++ b/docs/multitenant/usecase01/README.md @@ -3,20 +3,23 @@ # STEP BY STEP USE CASE -- [INTRODUCTION](#introduction) -- [OPERATION STEPS ](#operation-steps) -- [Download latest version from github ](#download-latest-version-from-orahub-a-namedownloada) -- [Upload webhook certificates](#upload-webhook-certificates-a-namewebhooka) -- [Create the dboperator](#create-the-dboperator-a-namedboperatora) -- [Create Secret for container registry](#create-secret-for-container-registry) -- [Build ords immage ](#build-ords-immage-a-nameordsimagea) -- [Database Configuration](#database-configuration) -- [Create CDB secret ](#create-cdb-secret) -- [Create Certificates](#create-certificates) -- [Apply cdb.yaml](#apply-cdbyaml) -- [Logs and throuble shutting](#cdb---logs-and-throuble-shutting) -- [Create PDB secret](#create-pdb-secret) -- [Other action ](#other-actions) +- [STEP BY STEP USE CASE](#step-by-step-use-case) + - [INTRODUCTION](#introduction) + - [OPERATIONAL STEPS](#operational-steps) + - [Download latest version from github ](#download-latest-version-from-github-) + - [Upload webhook certificates ](#upload-webhook-certificates-) + - [Create the dboperator ](#create-the-dboperator-) + - [Create secret for container registry](#create-secret-for-container-registry) + - [Build ords immage ](#build-ords-immage-) + - [Database Configuration](#database-configuration) + - [Create CDB secret](#create-cdb-secret) + - [Create Certificates](#create-certificates) + - [Apply cdb.yaml](#apply-cdbyaml) + - [CDB - Logs and throuble shutting](#cdb---logs-and-throuble-shutting) + - [Create PDB secret](#create-pdb-secret) + - [Apply pdb yaml file to create pdb](#apply-pdb-yaml-file-to-create-pdb) + - [Other actions](#other-actions) + - [Imperative approach on pdb deletion - will be avilable in 1.2.0 ](#imperative-approach-on-pdb-deletion) @@ -47,6 +50,7 @@ The following table reports the parameters required to configure and use oracle | pdbTlsKey | | [standalone.https.cert.key][key] | | pdbTlsCrt | | [standalone.https.cert][cr] | | pdbTlsCat | | certificate authority | +| assertivePdbDeletion | boolean | [turn on imperative approach on crd deleteion][imperative] | > A [makfile](./makefile) is available to sped up the command execution for the multitenant setup and test. See the comments in the header of file @@ -78,6 +82,7 @@ make operator-yaml IMG=operator:latest > **NOTE:** If you are using oracle-container-registry make sure to accept the license agreement otherwise the operator image pull fails. ---- + #### Upload webhook certificates ```bash @@ -101,6 +106,7 @@ oracle-database-operator-controller-manager-557ff6c659-xpswv 1/1 Running ``` ---- + #### Create secret for container registry + Make sure to login to your container registry and then create the secret for you container registry. @@ -119,6 +125,7 @@ container-registry-secret kubernetes.io/dockerconfigjson 1 19s webhook-server-cert kubernetes.io/tls ``` ---- + #### Build ords immage + Build the ords image, downloading ords software is no longer needed; just build the image and push it to your repository @@ -128,16 +135,17 @@ cd oracle-database-operator/ords docker build -t oracle/ords-dboper:latest . ``` -[example of execution](./BuildImage.log) +[Example of execution](./logfiles/BuildImage.log) + Login to your container registry and push the ords image. ```bash docker tag /ords-dboper:latest docker push /ords-dboper:latest ``` -[example of execution](./ImagePush.log) +[Example of execution](./logfiles/tagandpush.log) ---- + #### Database Configuration + Configure Database @@ -153,6 +161,7 @@ GRANT SYSDBA TO CONTAINER = ALL; GRANT CREATE SESSION TO CONTAINER = ALL; ``` ---- + #### Create CDB secret + Create secret for CDB connection @@ -208,6 +217,7 @@ webhook-server-cert kubernetes.io/tls 3 4m55s >**TIPS:** Use the following commands to analyze contents of an existing secret ```bash kubectl get secret -o yaml -n ``` ---- + #### Create Certificates + Create certificates: At this stage we need to create certificates on our local machine and upload into kubernetes cluster by creating new secrets. @@ -258,10 +268,11 @@ kubectl create secret generic db-ca --from-file= -n oracle-database-op ``` -[example of execution:](./openssl_execution.log) +[Example of execution:](./logfiles/openssl_execution.log) ---- + #### Apply cdb.yaml @@ -272,56 +283,54 @@ kubectl create secret generic db-ca --from-file= -n oracle-database-op + Create ords container ```bash -/usr/bin/kubectl apply -f cdb.yaml -n oracle-database-operator-system +/usr/bin/kubectl apply -f cdb_create.yaml -n oracle-database-operator-system ``` -Example: **cdb.yaml** +Example: **cdb_create.yaml** ```yaml apiVersion: database.oracle.com/v1alpha1 -kind: CDB -metadata: - name: +kind: CDB +metadata: + name: cdb-dev namespace: oracle-database-operator-system spec: - cdbName: "" - dbServer: "" or - dbPort: - ordsImage: "/ords-dboper:.latest" + cdbName: "DB12" + ordsImage: ".............your registry............./ords-dboper:latest" ordsImagePullPolicy: "Always" - serviceName: + dbTnsurl : "...Container tns alias....." replicas: 1 - sysAdminPwd: - secret: + sysAdminPwd: + secret: secretName: "cdb1-secret" key: "sysadmin_pwd" ordsPwd: - secret: + secret: secretName: "cdb1-secret" - key: "ords_pwd" - cdbAdminUser: - secret: + key: "ords_pwd" + cdbAdminUser: + secret: secretName: "cdb1-secret" key: "cdbadmin_user" - cdbAdminPwd: - secret: + cdbAdminPwd: + secret: secretName: "cdb1-secret" key: "cdbadmin_pwd" - webServerUser: - secret: + webServerUser: + secret: secretName: "cdb1-secret" key: "webserver_user" - webServerPwd: - secret: + webServerPwd: + secret: secretName: "cdb1-secret" - key: "webserver_pwd" + key: "webserver_pwd" cdbTlsKey: secret: secretName: "db-tls" - key: "" + key: "tls.key" cdbTlsCrt: secret: secretName: "db-tls" - key: ":" + key: "tls.crt" ``` > **Note** if you are working in dataguard environment with multiple sites (AC/DR) specifying the host name (dbServer/dbPort/serviceName) may not be the suitable solution for this kind of configuration, use **dbTnsurl** instead. Specify the whole tns string which includes the hosts/scan list. @@ -342,9 +351,11 @@ spec: dbtnsurl:((DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(TRANS...... ``` -[example of cdb.yaml](./cdb.yaml) +[Example of cdb.yaml](./cdb_create.yaml) + ---- + #### CDB - Logs and throuble shutting + Check the status of ords container @@ -379,14 +390,14 @@ NAME CDB NAME DB SERVER DB PORT REPLICAS STATUS MESSAG ```bash /usr/bin/kubectl logs `/usr/bin/kubectl get pods -n oracle-database-operator-system|grep ords|cut -d ' ' -f 1` -n oracle-database-operator-system ``` -[example of execution](./cdb.log) +[Example of cdb creation log](./logfiles/cdb_creation.log) + Test REST API from the pod. By querying the metadata catalog you can verify the status of https setting ```bash /usr/bin/kubectl exec -it `/usr/bin/kubectl get pods -n oracle-database-operator-system|grep ords|cut -d ' ' -f 1` -n oracle-database-operator-system -i -t -- /usr/bin/curl -sSkv -k -X GET https://localhost:8888/ords/_/db-api/stable/metadata-catalog/ ``` -[example of execution](./testapi.log) +[Example of execution](./logfiles/testapi.log) + Verify the pod environment varaibles ```bash @@ -403,9 +414,10 @@ NAME CDB NAME DB SERVER DB PORT REPLICAS STATUS MESSAG ```bash /usr/bin/kubectl exec -it `/usr/bin/kubectl get pods -n oracle-database-operator-system|grep ords|cut -d ' ' -f 1` -n oracle-database-operator-system -i -t -- /usr/local/bin/ords --config /etc/ords/config config list ``` -[Example of executions](./ordsconfig.log) +[Example of executions](./logfiles/ordsconfig.log) ----- + #### Create PDB secret @@ -439,30 +451,32 @@ pdb1-secret Opaque 2 79m <--- webhook-server-cert kubernetes.io/tls 3 79m ``` --- + #### Apply pdb yaml file to create pdb ```bash /usr/bin/kubectl apply -f pdb.yaml -n oracle-database-operator-system ``` -Example: **pdb.yaml** +Example: **pdb_create.yaml** ```yaml apiVersion: database.oracle.com/v1alpha1 kind: PDB metadata: - name: + name: pdb1 namespace: oracle-database-operator-system labels: - cdb: + cdb: cdb-dev spec: - cdbResName: "" - cdbName: "" - pdbName: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" adminName: secret: secretName: "pdb1-secret" - key: "sysadmin_user" + key: "sysadmin_user" adminPwd: secret: secretName: "pdb1-secret" @@ -470,19 +484,29 @@ spec: pdbTlsKey: secret: secretName: "db-tls" - key: "" + key: "tls.key" pdbTlsCrt: secret: secretName: "db-tls" - key: "" + key: "tls.crt" pdbTlsCat: secret: secretName: "db-ca" - key: "" + key: "ca.crt" + webServerUser: + secret: + secretName: "pdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "pdb1-secret" + key: "webserver_pwd" fileNameConversions: "NONE" + tdeImport: false totalSize: "1G" tempSize: "100M" action: "Create" + assertivePdbDeletion: true ``` + Monitor the pdb creation status until message is success @@ -529,13 +553,17 @@ kubectl logs -f $(kubectl get pods -n oracle-database-operator-system|grep oracl ``` --- + #### Other actions -Configure and use other yaml files to perform pluggable database life cycle managment action **modify_pdb_open.yaml** **modify_pdb_close.yaml** +Configure and use other yaml files to perform pluggable database life cycle managment action **pdb_open.yaml** **pdb_close.yaml** -> **Note** sql command *"alter pluggable database open instances=all;"* acts only on closed databases, so you want get any oracle error in case of execution against an pluggable database already opened +> **Note** sql command *"alter pluggable database open instances=all;"* acts only on closed databases, so you don't get any oracle error in case of execution against an pluggable database already opened +#### Imperative approach on pdb deletion +If **assertivePdbDeletion** is true then the command execution **kubectl delete pdbs crd_pdb_name** automatically deletes the pluggable database on the container database. By default this option is disabled. You can use this option during **create**,**map**,**plug** and **clone** operation. If the option is disabled then **kubectl delete** only deletes the crd but not the pluggable on the container db. Database deletion uses the option **including datafiles**. +If you drop the CRD without dropping the pluggable database and you need to recreate the CRD then you can use the [pdb_map.yaml](./pdb_map.yaml) [1]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-E9625FAB-9BC8-468B-9FF9-443C88D76FA1:~:text=Table%202%2D2%20Command%20Options%20for%20Command%2DLine%20Interface%20Installation @@ -558,4 +586,8 @@ Configure and use other yaml files to perform pluggable database life cycle mana [http]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-BEECC057-A8F5-4EAB-B88E-9828C2809CD8:~:text=Example%3A%20delete%20%5B%2D%2Dglobal%5D-,user%20add,-Add%20a%20user -[dbtnsurl]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-A9AED253-4EEC-4E13-A0C4-B7CE82EC1C22 \ No newline at end of file +[dbtnsurl]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-A9AED253-4EEC-4E13-A0C4-B7CE82EC1C22 + +[imperative]:https://kubernetes.io/docs/concepts/overview/working-with-objects/object-management/ + + diff --git a/docs/multitenant/usecase01/ca.crt b/docs/multitenant/usecase01/ca.crt new file mode 100644 index 00000000..cc9aa8bb --- /dev/null +++ b/docs/multitenant/usecase01/ca.crt @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIEJTCCAw2gAwIBAgIUNXPtpnNEFBCMcnxRP5kJsBDpafcwDQYJKoZIhvcNAQEL +BQAwgaExCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRUwEwYDVQQH +DAxTYW5GcmFuY2lzY28xEDAOBgNVBAoMB29yYWNsZSAxNjA0BgNVBAMMLWNkYi1k +ZXYtb3Jkcy5vcmFjbGUtZGF0YWJhc2Utb3BlcmF0b3Itc3lzdGVtIDEcMBoGA1UE +AwwTbG9jYWxob3N0ICBSb290IENBIDAeFw0yNDA4MTIxNTMyMzVaFw0yNTA4MTIx +NTMyMzVaMIGhMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEVMBMG +A1UEBwwMU2FuRnJhbmNpc2NvMRAwDgYDVQQKDAdvcmFjbGUgMTYwNAYDVQQDDC1j +ZGItZGV2LW9yZHMub3JhY2xlLWRhdGFiYXNlLW9wZXJhdG9yLXN5c3RlbSAxHDAa +BgNVBAMME2xvY2FsaG9zdCAgUm9vdCBDQSAwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQCmnGVApwUBF1kpqcyr2nYeED0VKvefpoHLtxHSP+vP0lWhW7NU +NJlb1YuUagjJ4/rpGRQmPxcVU51n3aAW3a5qHazIpNxNa3fvgB1rMOPFxGmdel2d +8lIt+u19q19DknX/GNgH9Mog8RcyZyPeA7d2icT8TBo74ognr+8p68O3CjBHQ8EM +SnRQR7/bh1c10Uia317ilKvs+I7oErTq5JFLeIuPDdAJ6UncaeblTf1XJ/1FrpHG +fSS7xmR8x0/MblBQlku4eImYmN35g+eRgf8bLDDwC+GPzDnAqqMLjx6h2N+btDxr +tnn05qyqmN9G08uUlP4d4BXi9ISb/toYypklAgMBAAGjUzBRMB0GA1UdDgQWBBS+ +a4X2XTmdPivdQtqDWNpfOtHypDAfBgNVHSMEGDAWgBS+a4X2XTmdPivdQtqDWNpf +OtHypDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAZIrGBNdSw +pe+1agefHfaR8hjZQiXBxdwHM1gR2LWOaFzMS8Q/eRETHTO6+VwQ0/FNaXbAqgqk +G317gZMXS5ZmXuOi28fTpAQtuzokkEKpoK0puTnbXOKGA2QSbBlpSFPqb3aJXvVt +afXFQb5P/0mhr4kuVt7Ech82WM/o5ryFgObygDayDmLatTp+VaRmBZPksnSMhslq +3zPyS7bx2YhbPTLkDxq8Mfr/Msxme8LvSXUpFf4PpQ5zwp1RE32gekct6eRQLmqU +5LXY2aPtqpMF0fBpcwPWbqA9gOYCRKcvXXIr+u1x8hf6Er6grZegHkM9TQ8s0hJd +sxi5tK0lPMHJ +-----END CERTIFICATE----- diff --git a/docs/multitenant/usecase01/ca.key b/docs/multitenant/usecase01/ca.key new file mode 100644 index 00000000..1a0ef89d --- /dev/null +++ b/docs/multitenant/usecase01/ca.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAppxlQKcFARdZKanMq9p2HhA9FSr3n6aBy7cR0j/rz9JVoVuz +VDSZW9WLlGoIyeP66RkUJj8XFVOdZ92gFt2uah2syKTcTWt374AdazDjxcRpnXpd +nfJSLfrtfatfQ5J1/xjYB/TKIPEXMmcj3gO3donE/EwaO+KIJ6/vKevDtwowR0PB +DEp0UEe/24dXNdFImt9e4pSr7PiO6BK06uSRS3iLjw3QCelJ3Gnm5U39Vyf9Ra6R +xn0ku8ZkfMdPzG5QUJZLuHiJmJjd+YPnkYH/Gyww8Avhj8w5wKqjC48eodjfm7Q8 +a7Z59OasqpjfRtPLlJT+HeAV4vSEm/7aGMqZJQIDAQABAoIBAGXRGYdjCgnarOBr +Jeq3vIsuvUVcVqs35AYMQFXOPltoXHAZTAPfiQC4BW6TRf+q1MDyVH/y+jZMPNsm +cxjGLDopHFgZd4/QZyDzmAbTf75yA2D7UI6fcV0sBUpRGgx/SqC0HADwtT1gWB6z +LRYWC13jX4AXOcjy7OXj/DIQJDCMivedt3dv0rDWJUcBCnVot5tr6zjycefxGKa8 +mG9LZQb3x71FxwpFUau3WLDSwOjtXCeMytaGXnGmIiofJmXnFi0KA4ApzKL7QV6I +cCBS1WBLLXeVM9vOfrtzKVLWGe0qADyLm35p5Fnl3j+vimkk8h/2DEvCZ75c987m +O3PEgdkCgYEA0Scg+KINTA78sdZL5v2+8fT4b+EfoCgUqfr10ReUPKrz3HfrVHcj +7Vf00RT52TkfmkL3mIdLyBUzQ9vzPgweo1o4yKCKNCpR9G3ydNW+KI5jSYnq2efz +Gpe3wTt+8YoyCgm9eUxNWjfO9fipS91sSotY0PovkBohj9aezfcWp1sCgYEAy+3n +MIvW/9PoYxCvQ9fDGLvx3B4/uy0ZYPh7j5edDuaRzwFd2YXUysXhJVuqTp0KT2tv +dRPFRE9Oq5N8e5ITIUiKLQ5PIRNBZm8CiAof+XS1fIuU+MTDaTfXwyGQo0xSg8MB +ITnJulmUlkcTWEtGyBi9sIjor5ve8kqvyrdAKX8CgYA9ZUUSd0978jJPad6iEf6J +PCXpgaYs91cJhre+BzPmkzA+mZ0lEEwlkdo1vfiRwWj7eYkA50Zhl4eS9e/zWM9t +mEBu9GFdasbf/55amZvWf+W5YpjkGmiMd9jjCjn7YVvLAozyHGngf91q6vGXaYou +X7VUsvxfSqxrcs7vGwc1XQKBgB0qaD80MMqj5v+MGlTsndWCw8OEe/7sI04QG7Pc +rjS8Wyws+NwsXNOnW1z5cDEQGrJjHiyzaCot4YV+cXZG3P+MnV52RnDnjRn2VHla +YVpPC8nFOMgfdAcvWmdo/IOuXbrEf/vdhPFm8G5Ruf2NvpDNoQuHeSfsdgVXEy89 +6CpHAoGBAMZInYD0XjcnZNqiQnQdcIJN3CqDIU76Z45OOpcUrYrvTos2xhGLrRI5 +qrk5Od/sovJfse+oUIIbgsABieqtyfxM03iu8fvbahIY6Un1iw2KN9t+mcPrSZJK +jTXKf7XxZ1+yN9kvohdLc65ySyXFSm++glDq8WGrmnOtLUlr0oMm +-----END RSA PRIVATE KEY----- diff --git a/docs/multitenant/usecase01/ca.srl b/docs/multitenant/usecase01/ca.srl new file mode 100644 index 00000000..7c9868bb --- /dev/null +++ b/docs/multitenant/usecase01/ca.srl @@ -0,0 +1 @@ +77D97AB4C4B6D5A9377B84B455D3E16348C6DE04 diff --git a/docs/multitenant/usecase01/extfile.txt b/docs/multitenant/usecase01/extfile.txt new file mode 100644 index 00000000..c51d22a3 --- /dev/null +++ b/docs/multitenant/usecase01/extfile.txt @@ -0,0 +1 @@ +subjectAltName=DNS:cdb-dev-ords.oracle-database-operator-system,DNS:www.example.com diff --git a/docs/multitenant/usecase01/logfiles/BuildImage.log b/docs/multitenant/usecase01/logfiles/BuildImage.log index 4ee2fa05..f35c66d8 100644 --- a/docs/multitenant/usecase01/logfiles/BuildImage.log +++ b/docs/multitenant/usecase01/logfiles/BuildImage.log @@ -1,487 +1,896 @@ -/usr/bin/docker build -t oracle/ords-dboper:latest . -Sending build context to Docker daemon 92.38MB -Step 1/10 : FROM container-registry.oracle.com/java/jdk:latest -Trying to pull repository container-registry.oracle.com/java/jdk ... -latest: Pulling from container-registry.oracle.com/java/jdk -7cb069903b8a: Pull complete -a98ca67f4239: Pull complete -1b4060d1d804: Pull complete -Digest: sha256:8e7161bbd6a3a3beb77ee6f2d80c17ae4c80d88e0f5af667a19a0271c33f1b5e -Status: Downloaded newer image for container-registry.oracle.com/java/jdk:latest - ---> ad9ff1bbe92a -Step 2/10 : ENV ORDS_HOME=/opt/oracle/ords/ RUN_FILE="runOrdsSSL.sh" - ---> Running in e6f76deab66e -Removing intermediate container e6f76deab66e - ---> 0b26c489e4fd -Step 3/10 : COPY $RUN_FILE $ORDS_HOME - ---> ee472155adab -Step 4/10 : RUN yum -y install yum-utils bind-utils tree hostname openssl net-tools zip unzip tar wget vim-minimal which sudo expect procps && yum-config-manager --add-repo=http://yum.oracle.com/repo/OracleLinux/OL8/oracle/software/x86_64 && yum -y install java-11-openjdk-devel && yum -y install ords && yum -y install iproute && yum clean all - ---> Running in d38a69d2cc70 -Oracle Linux 8 BaseOS Latest (x86_64) 105 MB/s | 50 MB 00:00 -Oracle Linux 8 Application Stream (x86_64) 90 MB/s | 38 MB 00:00 -Last metadata expiration check: 0:00:07 ago on Mon 10 Oct 2022 04:06:15 PM UTC. -Package yum-utils-4.0.21-11.0.1.el8.noarch is already installed. -Package tar-2:1.30-5.el8.x86_64 is already installed. +/usr/bin/docker build -t oracle/ords-dboper:latest ../../../ords +Sending build context to Docker daemon 13.82kB +Step 1/12 : FROM container-registry.oracle.com/java/jdk:latest + ---> b8457e2f0b73 +Step 2/12 : ENV ORDS_HOME=/opt/oracle/ords/ RUN_FILE="runOrdsSSL.sh" ORDSVERSION=23.4.0-8 + ---> Using cache + ---> 3317a16cd6f8 +Step 3/12 : COPY $RUN_FILE $ORDS_HOME + ---> 7995edec33cc +Step 4/12 : RUN yum -y install yum-utils bind-utils tree hostname openssl net-tools zip unzip tar wget vim-minimal which sudo expect procps curl lsof && yum-config-manager --add-repo=http://yum.oracle.com/repo/OracleLinux/OL8/oracle/software/x86_64 && yum -y install java-11-openjdk-devel && yum -y install iproute && yum clean all + ---> Running in fe168b01f3ad +Oracle Linux 8 BaseOS Latest (x86_64) 91 MB/s | 79 MB 00:00 +Oracle Linux 8 Application Stream (x86_64) 69 MB/s | 62 MB 00:00 +Last metadata expiration check: 0:00:12 ago on Tue 20 Aug 2024 08:54:50 AM UTC. +Package yum-utils-4.0.21-23.0.1.el8.noarch is already installed. +Package tar-2:1.30-9.el8.x86_64 is already installed. Package vim-minimal-2:8.0.1763-19.0.1.el8_6.4.x86_64 is already installed. -Package procps-ng-3.3.15-6.0.1.el8.x86_64 is already installed. +Package procps-ng-3.3.15-14.0.1.el8.x86_64 is already installed. +Package curl-7.61.1-33.el8_9.5.x86_64 is already installed. Dependencies resolved. ================================================================================ - Package Arch Version Repository Size + Package Arch Version Repository Size ================================================================================ Installing: - bind-utils x86_64 32:9.11.36-3.el8_6.1 ol8_appstream 452 k - expect x86_64 5.45.4-5.el8 ol8_baseos_latest 266 k - hostname x86_64 3.20-6.el8 ol8_baseos_latest 32 k - net-tools x86_64 2.0-0.52.20160912git.el8 ol8_baseos_latest 322 k - openssl x86_64 1:1.1.1k-7.el8_6 ol8_baseos_latest 709 k - sudo x86_64 1.8.29-8.el8 ol8_baseos_latest 925 k - tree x86_64 1.7.0-15.el8 ol8_baseos_latest 59 k - unzip x86_64 6.0-46.0.1.el8 ol8_baseos_latest 196 k - wget x86_64 1.19.5-10.0.1.el8 ol8_appstream 734 k - which x86_64 2.21-17.el8 ol8_baseos_latest 49 k - zip x86_64 3.0-23.el8 ol8_baseos_latest 270 k + bind-utils x86_64 32:9.11.36-16.el8_10.2 ol8_appstream 453 k + expect x86_64 5.45.4-5.el8 ol8_baseos_latest 266 k + hostname x86_64 3.20-6.el8 ol8_baseos_latest 32 k + lsof x86_64 4.93.2-1.el8 ol8_baseos_latest 253 k + net-tools x86_64 2.0-0.52.20160912git.el8 ol8_baseos_latest 322 k + openssl x86_64 1:1.1.1k-12.el8_9 ol8_baseos_latest 710 k + sudo x86_64 1.9.5p2-1.el8_9 ol8_baseos_latest 1.0 M + tree x86_64 1.7.0-15.el8 ol8_baseos_latest 59 k + unzip x86_64 6.0-46.0.1.el8 ol8_baseos_latest 196 k + wget x86_64 1.19.5-12.0.1.el8_10 ol8_appstream 733 k + which x86_64 2.21-20.el8 ol8_baseos_latest 50 k + zip x86_64 3.0-23.el8 ol8_baseos_latest 270 k +Upgrading: + curl x86_64 7.61.1-34.el8 ol8_baseos_latest 352 k + dnf-plugins-core noarch 4.0.21-25.0.1.el8 ol8_baseos_latest 76 k + libcurl x86_64 7.61.1-34.el8 ol8_baseos_latest 303 k + python3-dnf-plugins-core + noarch 4.0.21-25.0.1.el8 ol8_baseos_latest 263 k + yum-utils noarch 4.0.21-25.0.1.el8 ol8_baseos_latest 75 k Installing dependencies: - bind-libs x86_64 32:9.11.36-3.el8_6.1 ol8_appstream 175 k - bind-libs-lite x86_64 32:9.11.36-3.el8_6.1 ol8_appstream 1.2 M - bind-license noarch 32:9.11.36-3.el8_6.1 ol8_appstream 103 k - fstrm x86_64 0.6.1-2.el8 ol8_appstream 29 k - libmaxminddb x86_64 1.2.0-10.el8 ol8_appstream 33 k - libmetalink x86_64 0.1.3-7.el8 ol8_baseos_latest 32 k - protobuf-c x86_64 1.3.0-6.el8 ol8_appstream 37 k - python3-bind noarch 32:9.11.36-3.el8_6.1 ol8_appstream 150 k - python3-ply noarch 3.9-9.el8 ol8_baseos_latest 111 k - tcl x86_64 1:8.6.8-2.el8 ol8_baseos_latest 1.1 M + bind-libs x86_64 32:9.11.36-16.el8_10.2 ol8_appstream 176 k + bind-libs-lite x86_64 32:9.11.36-16.el8_10.2 ol8_appstream 1.2 M + bind-license noarch 32:9.11.36-16.el8_10.2 ol8_appstream 104 k + fstrm x86_64 0.6.1-3.el8 ol8_appstream 29 k + libmaxminddb x86_64 1.2.0-10.el8_9.1 ol8_appstream 32 k + libmetalink x86_64 0.1.3-7.el8 ol8_baseos_latest 32 k + protobuf-c x86_64 1.3.0-8.el8 ol8_appstream 37 k + python3-bind noarch 32:9.11.36-16.el8_10.2 ol8_appstream 151 k + python3-ply noarch 3.9-9.el8 ol8_baseos_latest 111 k + tcl x86_64 1:8.6.8-2.el8 ol8_baseos_latest 1.1 M +Installing weak dependencies: + geolite2-city noarch 20180605-1.el8 ol8_appstream 19 M + geolite2-country noarch 20180605-1.el8 ol8_appstream 1.0 M Transaction Summary ================================================================================ -Install 21 Packages +Install 24 Packages +Upgrade 5 Packages -Total download size: 6.9 M -Installed size: 20 M +Total download size: 28 M Downloading Packages: -(1/21): hostname-3.20-6.el8.x86_64.rpm 555 kB/s | 32 kB 00:00 -(2/21): libmetalink-0.1.3-7.el8.x86_64.rpm 492 kB/s | 32 kB 00:00 -(3/21): expect-5.45.4-5.el8.x86_64.rpm 3.2 MB/s | 266 kB 00:00 -(4/21): python3-ply-3.9-9.el8.noarch.rpm 5.5 MB/s | 111 kB 00:00 -(5/21): net-tools-2.0-0.52.20160912git.el8.x86_ 6.7 MB/s | 322 kB 00:00 -(6/21): openssl-1.1.1k-7.el8_6.x86_64.rpm 12 MB/s | 709 kB 00:00 -(7/21): tree-1.7.0-15.el8.x86_64.rpm 4.1 MB/s | 59 kB 00:00 -(8/21): sudo-1.8.29-8.el8.x86_64.rpm 19 MB/s | 925 kB 00:00 -(9/21): which-2.21-17.el8.x86_64.rpm 2.5 MB/s | 49 kB 00:00 -(10/21): unzip-6.0-46.0.1.el8.x86_64.rpm 5.9 MB/s | 196 kB 00:00 -(11/21): tcl-8.6.8-2.el8.x86_64.rpm 15 MB/s | 1.1 MB 00:00 -(12/21): zip-3.0-23.el8.x86_64.rpm 15 MB/s | 270 kB 00:00 -(13/21): bind-libs-9.11.36-3.el8_6.1.x86_64.rpm 7.9 MB/s | 175 kB 00:00 -(14/21): bind-license-9.11.36-3.el8_6.1.noarch. 4.9 MB/s | 103 kB 00:00 -(15/21): bind-utils-9.11.36-3.el8_6.1.x86_64.rp 21 MB/s | 452 kB 00:00 -(16/21): bind-libs-lite-9.11.36-3.el8_6.1.x86_6 28 MB/s | 1.2 MB 00:00 -(17/21): libmaxminddb-1.2.0-10.el8.x86_64.rpm 1.8 MB/s | 33 kB 00:00 -(18/21): fstrm-0.6.1-2.el8.x86_64.rpm 1.0 MB/s | 29 kB 00:00 -(19/21): protobuf-c-1.3.0-6.el8.x86_64.rpm 1.4 MB/s | 37 kB 00:00 -(20/21): python3-bind-9.11.36-3.el8_6.1.noarch. 9.2 MB/s | 150 kB 00:00 -(21/21): wget-1.19.5-10.0.1.el8.x86_64.rpm 7.5 MB/s | 734 kB 00:00 +(1/29): hostname-3.20-6.el8.x86_64.rpm 268 kB/s | 32 kB 00:00 +(2/29): libmetalink-0.1.3-7.el8.x86_64.rpm 257 kB/s | 32 kB 00:00 +(3/29): expect-5.45.4-5.el8.x86_64.rpm 1.4 MB/s | 266 kB 00:00 +(4/29): lsof-4.93.2-1.el8.x86_64.rpm 3.2 MB/s | 253 kB 00:00 +(5/29): net-tools-2.0-0.52.20160912git.el8.x86_ 3.6 MB/s | 322 kB 00:00 +(6/29): python3-ply-3.9-9.el8.noarch.rpm 2.7 MB/s | 111 kB 00:00 +(7/29): openssl-1.1.1k-12.el8_9.x86_64.rpm 10 MB/s | 710 kB 00:00 +(8/29): tree-1.7.0-15.el8.x86_64.rpm 2.2 MB/s | 59 kB 00:00 +(9/29): sudo-1.9.5p2-1.el8_9.x86_64.rpm 14 MB/s | 1.0 MB 00:00 +(10/29): unzip-6.0-46.0.1.el8.x86_64.rpm 6.8 MB/s | 196 kB 00:00 +(11/29): which-2.21-20.el8.x86_64.rpm 2.0 MB/s | 50 kB 00:00 +(12/29): tcl-8.6.8-2.el8.x86_64.rpm 13 MB/s | 1.1 MB 00:00 +(13/29): bind-libs-9.11.36-16.el8_10.2.x86_64.r 6.7 MB/s | 176 kB 00:00 +(14/29): zip-3.0-23.el8.x86_64.rpm 8.4 MB/s | 270 kB 00:00 +(15/29): bind-libs-lite-9.11.36-16.el8_10.2.x86 29 MB/s | 1.2 MB 00:00 +(16/29): bind-license-9.11.36-16.el8_10.2.noarc 3.3 MB/s | 104 kB 00:00 +(17/29): bind-utils-9.11.36-16.el8_10.2.x86_64. 13 MB/s | 453 kB 00:00 +(18/29): fstrm-0.6.1-3.el8.x86_64.rpm 1.2 MB/s | 29 kB 00:00 +(19/29): libmaxminddb-1.2.0-10.el8_9.1.x86_64.r 1.3 MB/s | 32 kB 00:00 +(20/29): geolite2-country-20180605-1.el8.noarch 17 MB/s | 1.0 MB 00:00 +(21/29): protobuf-c-1.3.0-8.el8.x86_64.rpm 1.5 MB/s | 37 kB 00:00 +(22/29): python3-bind-9.11.36-16.el8_10.2.noarc 5.8 MB/s | 151 kB 00:00 +(23/29): wget-1.19.5-12.0.1.el8_10.x86_64.rpm 17 MB/s | 733 kB 00:00 +(24/29): curl-7.61.1-34.el8.x86_64.rpm 12 MB/s | 352 kB 00:00 +(25/29): dnf-plugins-core-4.0.21-25.0.1.el8.noa 2.4 MB/s | 76 kB 00:00 +(26/29): libcurl-7.61.1-34.el8.x86_64.rpm 8.6 MB/s | 303 kB 00:00 +(27/29): python3-dnf-plugins-core-4.0.21-25.0.1 9.8 MB/s | 263 kB 00:00 +(28/29): yum-utils-4.0.21-25.0.1.el8.noarch.rpm 3.0 MB/s | 75 kB 00:00 +(29/29): geolite2-city-20180605-1.el8.noarch.rp 66 MB/s | 19 MB 00:00 -------------------------------------------------------------------------------- -Total 20 MB/s | 6.9 MB 00:00 +Total 43 MB/s | 28 MB 00:00 Running transaction check Transaction check succeeded. Running transaction test Transaction test succeeded. Running transaction Preparing : 1/1 - Installing : protobuf-c-1.3.0-6.el8.x86_64 1/21 - Installing : libmaxminddb-1.2.0-10.el8.x86_64 2/21 - Running scriptlet: libmaxminddb-1.2.0-10.el8.x86_64 2/21 - Installing : fstrm-0.6.1-2.el8.x86_64 3/21 - Installing : bind-license-32:9.11.36-3.el8_6.1.noarch 4/21 - Installing : bind-libs-lite-32:9.11.36-3.el8_6.1.x86_64 5/21 - Installing : bind-libs-32:9.11.36-3.el8_6.1.x86_64 6/21 - Installing : unzip-6.0-46.0.1.el8.x86_64 7/21 - Installing : tcl-1:8.6.8-2.el8.x86_64 8/21 - Running scriptlet: tcl-1:8.6.8-2.el8.x86_64 8/21 - Installing : python3-ply-3.9-9.el8.noarch 9/21 - Installing : python3-bind-32:9.11.36-3.el8_6.1.noarch 10/21 - Installing : libmetalink-0.1.3-7.el8.x86_64 11/21 - Installing : wget-1.19.5-10.0.1.el8.x86_64 12/21 - Running scriptlet: wget-1.19.5-10.0.1.el8.x86_64 12/21 - Installing : bind-utils-32:9.11.36-3.el8_6.1.x86_64 13/21 - Installing : expect-5.45.4-5.el8.x86_64 14/21 - Installing : zip-3.0-23.el8.x86_64 15/21 - Installing : which-2.21-17.el8.x86_64 16/21 - Installing : tree-1.7.0-15.el8.x86_64 17/21 - Installing : sudo-1.8.29-8.el8.x86_64 18/21 - Running scriptlet: sudo-1.8.29-8.el8.x86_64 18/21 - Installing : openssl-1:1.1.1k-7.el8_6.x86_64 19/21 - Installing : net-tools-2.0-0.52.20160912git.el8.x86_64 20/21 - Running scriptlet: net-tools-2.0-0.52.20160912git.el8.x86_64 20/21 - Installing : hostname-3.20-6.el8.x86_64 21/21 - Running scriptlet: hostname-3.20-6.el8.x86_64 21/21 - Verifying : expect-5.45.4-5.el8.x86_64 1/21 - Verifying : hostname-3.20-6.el8.x86_64 2/21 - Verifying : libmetalink-0.1.3-7.el8.x86_64 3/21 - Verifying : net-tools-2.0-0.52.20160912git.el8.x86_64 4/21 - Verifying : openssl-1:1.1.1k-7.el8_6.x86_64 5/21 - Verifying : python3-ply-3.9-9.el8.noarch 6/21 - Verifying : sudo-1.8.29-8.el8.x86_64 7/21 - Verifying : tcl-1:8.6.8-2.el8.x86_64 8/21 - Verifying : tree-1.7.0-15.el8.x86_64 9/21 - Verifying : unzip-6.0-46.0.1.el8.x86_64 10/21 - Verifying : which-2.21-17.el8.x86_64 11/21 - Verifying : zip-3.0-23.el8.x86_64 12/21 - Verifying : bind-libs-32:9.11.36-3.el8_6.1.x86_64 13/21 - Verifying : bind-libs-lite-32:9.11.36-3.el8_6.1.x86_64 14/21 - Verifying : bind-license-32:9.11.36-3.el8_6.1.noarch 15/21 - Verifying : bind-utils-32:9.11.36-3.el8_6.1.x86_64 16/21 - Verifying : fstrm-0.6.1-2.el8.x86_64 17/21 - Verifying : libmaxminddb-1.2.0-10.el8.x86_64 18/21 - Verifying : protobuf-c-1.3.0-6.el8.x86_64 19/21 - Verifying : python3-bind-32:9.11.36-3.el8_6.1.noarch 20/21 - Verifying : wget-1.19.5-10.0.1.el8.x86_64 21/21 + Running scriptlet: protobuf-c-1.3.0-8.el8.x86_64 1/1 + Installing : protobuf-c-1.3.0-8.el8.x86_64 1/34 + Installing : fstrm-0.6.1-3.el8.x86_64 2/34 + Installing : bind-license-32:9.11.36-16.el8_10.2.noarch 3/34 + Upgrading : python3-dnf-plugins-core-4.0.21-25.0.1.el8.noarch 4/34 + Upgrading : dnf-plugins-core-4.0.21-25.0.1.el8.noarch 5/34 + Upgrading : libcurl-7.61.1-34.el8.x86_64 6/34 + Installing : geolite2-country-20180605-1.el8.noarch 7/34 + Installing : geolite2-city-20180605-1.el8.noarch 8/34 + Installing : libmaxminddb-1.2.0-10.el8_9.1.x86_64 9/34 + Running scriptlet: libmaxminddb-1.2.0-10.el8_9.1.x86_64 9/34 + Installing : bind-libs-lite-32:9.11.36-16.el8_10.2.x86_64 10/34 + Installing : bind-libs-32:9.11.36-16.el8_10.2.x86_64 11/34 + Installing : unzip-6.0-46.0.1.el8.x86_64 12/34 + Installing : tcl-1:8.6.8-2.el8.x86_64 13/34 + Running scriptlet: tcl-1:8.6.8-2.el8.x86_64 13/34 + Installing : python3-ply-3.9-9.el8.noarch 14/34 + Installing : python3-bind-32:9.11.36-16.el8_10.2.noarch 15/34 + Installing : libmetalink-0.1.3-7.el8.x86_64 16/34 + Installing : wget-1.19.5-12.0.1.el8_10.x86_64 17/34 + Running scriptlet: wget-1.19.5-12.0.1.el8_10.x86_64 17/34 + Installing : bind-utils-32:9.11.36-16.el8_10.2.x86_64 18/34 + Installing : expect-5.45.4-5.el8.x86_64 19/34 + Installing : zip-3.0-23.el8.x86_64 20/34 + Upgrading : curl-7.61.1-34.el8.x86_64 21/34 + Upgrading : yum-utils-4.0.21-25.0.1.el8.noarch 22/34 + Installing : which-2.21-20.el8.x86_64 23/34 + Installing : tree-1.7.0-15.el8.x86_64 24/34 + Installing : sudo-1.9.5p2-1.el8_9.x86_64 25/34 + Running scriptlet: sudo-1.9.5p2-1.el8_9.x86_64 25/34 + Installing : openssl-1:1.1.1k-12.el8_9.x86_64 26/34 + Installing : net-tools-2.0-0.52.20160912git.el8.x86_64 27/34 + Running scriptlet: net-tools-2.0-0.52.20160912git.el8.x86_64 27/34 + Installing : lsof-4.93.2-1.el8.x86_64 28/34 + Installing : hostname-3.20-6.el8.x86_64 29/34 + Running scriptlet: hostname-3.20-6.el8.x86_64 29/34 + Cleanup : curl-7.61.1-33.el8_9.5.x86_64 30/34 + Cleanup : yum-utils-4.0.21-23.0.1.el8.noarch 31/34 + Cleanup : dnf-plugins-core-4.0.21-23.0.1.el8.noarch 32/34 + Cleanup : python3-dnf-plugins-core-4.0.21-23.0.1.el8.noarch 33/34 + Cleanup : libcurl-7.61.1-33.el8_9.5.x86_64 34/34 + Running scriptlet: libcurl-7.61.1-33.el8_9.5.x86_64 34/34 + Verifying : expect-5.45.4-5.el8.x86_64 1/34 + Verifying : hostname-3.20-6.el8.x86_64 2/34 + Verifying : libmetalink-0.1.3-7.el8.x86_64 3/34 + Verifying : lsof-4.93.2-1.el8.x86_64 4/34 + Verifying : net-tools-2.0-0.52.20160912git.el8.x86_64 5/34 + Verifying : openssl-1:1.1.1k-12.el8_9.x86_64 6/34 + Verifying : python3-ply-3.9-9.el8.noarch 7/34 + Verifying : sudo-1.9.5p2-1.el8_9.x86_64 8/34 + Verifying : tcl-1:8.6.8-2.el8.x86_64 9/34 + Verifying : tree-1.7.0-15.el8.x86_64 10/34 + Verifying : unzip-6.0-46.0.1.el8.x86_64 11/34 + Verifying : which-2.21-20.el8.x86_64 12/34 + Verifying : zip-3.0-23.el8.x86_64 13/34 + Verifying : bind-libs-32:9.11.36-16.el8_10.2.x86_64 14/34 + Verifying : bind-libs-lite-32:9.11.36-16.el8_10.2.x86_64 15/34 + Verifying : bind-license-32:9.11.36-16.el8_10.2.noarch 16/34 + Verifying : bind-utils-32:9.11.36-16.el8_10.2.x86_64 17/34 + Verifying : fstrm-0.6.1-3.el8.x86_64 18/34 + Verifying : geolite2-city-20180605-1.el8.noarch 19/34 + Verifying : geolite2-country-20180605-1.el8.noarch 20/34 + Verifying : libmaxminddb-1.2.0-10.el8_9.1.x86_64 21/34 + Verifying : protobuf-c-1.3.0-8.el8.x86_64 22/34 + Verifying : python3-bind-32:9.11.36-16.el8_10.2.noarch 23/34 + Verifying : wget-1.19.5-12.0.1.el8_10.x86_64 24/34 + Verifying : curl-7.61.1-34.el8.x86_64 25/34 + Verifying : curl-7.61.1-33.el8_9.5.x86_64 26/34 + Verifying : dnf-plugins-core-4.0.21-25.0.1.el8.noarch 27/34 + Verifying : dnf-plugins-core-4.0.21-23.0.1.el8.noarch 28/34 + Verifying : libcurl-7.61.1-34.el8.x86_64 29/34 + Verifying : libcurl-7.61.1-33.el8_9.5.x86_64 30/34 + Verifying : python3-dnf-plugins-core-4.0.21-25.0.1.el8.noarch 31/34 + Verifying : python3-dnf-plugins-core-4.0.21-23.0.1.el8.noarch 32/34 + Verifying : yum-utils-4.0.21-25.0.1.el8.noarch 33/34 + Verifying : yum-utils-4.0.21-23.0.1.el8.noarch 34/34 +Upgraded: + curl-7.61.1-34.el8.x86_64 + dnf-plugins-core-4.0.21-25.0.1.el8.noarch + libcurl-7.61.1-34.el8.x86_64 + python3-dnf-plugins-core-4.0.21-25.0.1.el8.noarch + yum-utils-4.0.21-25.0.1.el8.noarch Installed: - bind-libs-32:9.11.36-3.el8_6.1.x86_64 - bind-libs-lite-32:9.11.36-3.el8_6.1.x86_64 - bind-license-32:9.11.36-3.el8_6.1.noarch - bind-utils-32:9.11.36-3.el8_6.1.x86_64 + bind-libs-32:9.11.36-16.el8_10.2.x86_64 + bind-libs-lite-32:9.11.36-16.el8_10.2.x86_64 + bind-license-32:9.11.36-16.el8_10.2.noarch + bind-utils-32:9.11.36-16.el8_10.2.x86_64 expect-5.45.4-5.el8.x86_64 - fstrm-0.6.1-2.el8.x86_64 + fstrm-0.6.1-3.el8.x86_64 + geolite2-city-20180605-1.el8.noarch + geolite2-country-20180605-1.el8.noarch hostname-3.20-6.el8.x86_64 - libmaxminddb-1.2.0-10.el8.x86_64 + libmaxminddb-1.2.0-10.el8_9.1.x86_64 libmetalink-0.1.3-7.el8.x86_64 + lsof-4.93.2-1.el8.x86_64 net-tools-2.0-0.52.20160912git.el8.x86_64 - openssl-1:1.1.1k-7.el8_6.x86_64 - protobuf-c-1.3.0-6.el8.x86_64 - python3-bind-32:9.11.36-3.el8_6.1.noarch + openssl-1:1.1.1k-12.el8_9.x86_64 + protobuf-c-1.3.0-8.el8.x86_64 + python3-bind-32:9.11.36-16.el8_10.2.noarch python3-ply-3.9-9.el8.noarch - sudo-1.8.29-8.el8.x86_64 + sudo-1.9.5p2-1.el8_9.x86_64 tcl-1:8.6.8-2.el8.x86_64 tree-1.7.0-15.el8.x86_64 unzip-6.0-46.0.1.el8.x86_64 - wget-1.19.5-10.0.1.el8.x86_64 - which-2.21-17.el8.x86_64 + wget-1.19.5-12.0.1.el8_10.x86_64 + which-2.21-20.el8.x86_64 zip-3.0-23.el8.x86_64 Complete! Adding repo from: http://yum.oracle.com/repo/OracleLinux/OL8/oracle/software/x86_64 -created by dnf config-manager from http://yum.o 194 kB/s | 49 kB 00:00 +created by dnf config-manager from http://yum.o 496 kB/s | 139 kB 00:00 +Last metadata expiration check: 0:00:01 ago on Tue 20 Aug 2024 08:55:14 AM UTC. Dependencies resolved. -============================================================================================= - Package Arch Version Repository Size -============================================================================================= +============================================================================================== + Package Arch Version Repository Size +============================================================================================== Installing: - java-11-openjdk-devel x86_64 1:11.0.16.1.1-1.el8_6 ol8_appstream 3.4 M + java-11-openjdk-devel x86_64 1:11.0.24.0.8-3.0.1.el8 ol8_appstream 3.4 M Installing dependencies: - alsa-lib x86_64 1.2.6.1-3.el8 ol8_appstream 491 k - avahi-libs x86_64 0.7-20.el8 ol8_baseos_latest 62 k - copy-jdk-configs noarch 4.0-2.el8 ol8_appstream 30 k - crypto-policies-scripts noarch 20211116-1.gitae470d6.el8 ol8_baseos_latest 83 k - cups-libs x86_64 1:2.2.6-45.el8_6.2 ol8_baseos_latest 434 k - giflib x86_64 5.1.4-3.el8 ol8_appstream 51 k - graphite2 x86_64 1.3.10-10.el8 ol8_appstream 122 k - harfbuzz x86_64 1.7.5-3.el8 ol8_appstream 295 k - java-11-openjdk x86_64 1:11.0.16.1.1-1.el8_6 ol8_appstream 272 k - java-11-openjdk-headless x86_64 1:11.0.16.1.1-1.el8_6 ol8_appstream 40 M - javapackages-filesystem noarch 5.3.0-1.module+el8+5136+7ff78f74 ol8_appstream 30 k - lcms2 x86_64 2.9-2.el8 ol8_appstream 164 k - libX11 x86_64 1.6.8-5.el8 ol8_appstream 611 k - libX11-common noarch 1.6.8-5.el8 ol8_appstream 158 k - libXau x86_64 1.0.9-3.el8 ol8_appstream 37 k - libXcomposite x86_64 0.4.4-14.el8 ol8_appstream 28 k - libXext x86_64 1.3.4-1.el8 ol8_appstream 45 k - libXi x86_64 1.7.10-1.el8 ol8_appstream 49 k - libXrender x86_64 0.9.10-7.el8 ol8_appstream 33 k - libXtst x86_64 1.2.3-7.el8 ol8_appstream 22 k - libfontenc x86_64 1.1.3-8.el8 ol8_appstream 37 k - libjpeg-turbo x86_64 1.5.3-12.el8 ol8_appstream 157 k - libpkgconf x86_64 1.4.2-1.el8 ol8_baseos_latest 35 k - libxcb x86_64 1.13.1-1.el8 ol8_appstream 231 k - lksctp-tools x86_64 1.0.18-3.el8 ol8_baseos_latest 100 k - lua x86_64 5.3.4-12.el8 ol8_appstream 192 k - nspr x86_64 4.34.0-3.el8_6 ol8_appstream 143 k - nss x86_64 3.79.0-10.el8_6 ol8_appstream 747 k - nss-softokn x86_64 3.79.0-10.el8_6 ol8_appstream 1.2 M - nss-softokn-freebl x86_64 3.79.0-10.el8_6 ol8_appstream 398 k - nss-sysinit x86_64 3.79.0-10.el8_6 ol8_appstream 74 k - nss-util x86_64 3.79.0-10.el8_6 ol8_appstream 138 k - pkgconf x86_64 1.4.2-1.el8 ol8_baseos_latest 38 k - pkgconf-m4 noarch 1.4.2-1.el8 ol8_baseos_latest 17 k - pkgconf-pkg-config x86_64 1.4.2-1.el8 ol8_baseos_latest 15 k - ttmkfdir x86_64 3.0.9-54.el8 ol8_appstream 62 k - tzdata-java noarch 2022d-1.el8 ol8_appstream 186 k - xorg-x11-font-utils x86_64 1:7.5-41.el8 ol8_appstream 104 k - xorg-x11-fonts-Type1 noarch 7.5-19.el8 ol8_appstream 522 k + adwaita-cursor-theme noarch 3.28.0-3.el8 ol8_appstream 647 k + adwaita-icon-theme noarch 3.28.0-3.el8 ol8_appstream 11 M + alsa-lib x86_64 1.2.10-2.el8 ol8_appstream 500 k + at-spi2-atk x86_64 2.26.2-1.el8 ol8_appstream 89 k + at-spi2-core x86_64 2.28.0-1.el8 ol8_appstream 169 k + atk x86_64 2.28.1-1.el8 ol8_appstream 272 k + avahi-libs x86_64 0.7-27.el8 ol8_baseos_latest 61 k + cairo x86_64 1.15.12-6.el8 ol8_appstream 719 k + cairo-gobject x86_64 1.15.12-6.el8 ol8_appstream 33 k + colord-libs x86_64 1.4.2-1.el8 ol8_appstream 236 k + copy-jdk-configs noarch 4.0-2.el8 ol8_appstream 30 k + cpio x86_64 2.12-11.el8 ol8_baseos_latest 266 k + crypto-policies-scripts noarch 20230731-1.git3177e06.el8 ol8_baseos_latest 84 k + cups-libs x86_64 1:2.2.6-60.el8_10 ol8_baseos_latest 435 k + dracut x86_64 049-233.git20240115.0.1.el8 ol8_baseos_latest 382 k + file x86_64 5.33-25.el8 ol8_baseos_latest 77 k + fribidi x86_64 1.0.4-9.el8 ol8_appstream 89 k + gdk-pixbuf2 x86_64 2.36.12-6.el8_10 ol8_baseos_latest 465 k + gdk-pixbuf2-modules x86_64 2.36.12-6.el8_10 ol8_appstream 108 k + gettext x86_64 0.19.8.1-17.el8 ol8_baseos_latest 1.1 M + gettext-libs x86_64 0.19.8.1-17.el8 ol8_baseos_latest 312 k + glib-networking x86_64 2.56.1-1.1.el8 ol8_baseos_latest 155 k + graphite2 x86_64 1.3.10-10.el8 ol8_appstream 122 k + grub2-common noarch 1:2.02-156.0.2.el8 ol8_baseos_latest 897 k + grub2-tools x86_64 1:2.02-156.0.2.el8 ol8_baseos_latest 2.0 M + grub2-tools-minimal x86_64 1:2.02-156.0.2.el8 ol8_baseos_latest 215 k + gsettings-desktop-schemas x86_64 3.32.0-6.el8 ol8_baseos_latest 633 k + gtk-update-icon-cache x86_64 3.22.30-11.el8 ol8_appstream 32 k + harfbuzz x86_64 1.7.5-4.el8 ol8_appstream 295 k + hicolor-icon-theme noarch 0.17-2.el8 ol8_appstream 48 k + jasper-libs x86_64 2.0.14-5.el8 ol8_appstream 167 k + java-11-openjdk x86_64 1:11.0.24.0.8-3.0.1.el8 ol8_appstream 475 k + java-11-openjdk-headless x86_64 1:11.0.24.0.8-3.0.1.el8 ol8_appstream 42 M + javapackages-filesystem noarch 5.3.0-1.module+el8+5136+7ff78f74 ol8_appstream 30 k + jbigkit-libs x86_64 2.1-14.el8 ol8_appstream 55 k + json-glib x86_64 1.4.4-1.el8 ol8_baseos_latest 144 k + kbd-legacy noarch 2.0.4-11.el8 ol8_baseos_latest 481 k + kbd-misc noarch 2.0.4-11.el8 ol8_baseos_latest 1.5 M + lcms2 x86_64 2.9-2.el8 ol8_appstream 164 k + libX11 x86_64 1.6.8-8.el8 ol8_appstream 611 k + libX11-common noarch 1.6.8-8.el8 ol8_appstream 157 k + libXau x86_64 1.0.9-3.el8 ol8_appstream 37 k + libXcomposite x86_64 0.4.4-14.el8 ol8_appstream 28 k + libXcursor x86_64 1.1.15-3.el8 ol8_appstream 36 k + libXdamage x86_64 1.1.4-14.el8 ol8_appstream 27 k + libXext x86_64 1.3.4-1.el8 ol8_appstream 45 k + libXfixes x86_64 5.0.3-7.el8 ol8_appstream 25 k + libXft x86_64 2.3.3-1.el8 ol8_appstream 67 k + libXi x86_64 1.7.10-1.el8 ol8_appstream 49 k + libXinerama x86_64 1.1.4-1.el8 ol8_appstream 15 k + libXrandr x86_64 1.5.2-1.el8 ol8_appstream 34 k + libXrender x86_64 0.9.10-7.el8 ol8_appstream 33 k + libXtst x86_64 1.2.3-7.el8 ol8_appstream 22 k + libcroco x86_64 0.6.12-4.el8_2.1 ol8_baseos_latest 113 k + libdatrie x86_64 0.2.9-7.el8 ol8_appstream 33 k + libepoxy x86_64 1.5.8-1.el8 ol8_appstream 225 k + libfontenc x86_64 1.1.3-8.el8 ol8_appstream 37 k + libgomp x86_64 8.5.0-22.0.1.el8_10 ol8_baseos_latest 218 k + libgusb x86_64 0.3.0-1.el8 ol8_baseos_latest 49 k + libjpeg-turbo x86_64 1.5.3-12.el8 ol8_appstream 157 k + libkcapi x86_64 1.4.0-2.0.1.el8 ol8_baseos_latest 52 k + libkcapi-hmaccalc x86_64 1.4.0-2.0.1.el8 ol8_baseos_latest 31 k + libmodman x86_64 2.0.1-17.el8 ol8_baseos_latest 36 k + libpkgconf x86_64 1.4.2-1.el8 ol8_baseos_latest 35 k + libproxy x86_64 0.4.15-5.2.el8 ol8_baseos_latest 75 k + libsoup x86_64 2.62.3-5.el8 ol8_baseos_latest 424 k + libthai x86_64 0.1.27-2.el8 ol8_appstream 203 k + libtiff x86_64 4.0.9-32.el8_10 ol8_appstream 189 k + libwayland-client x86_64 1.21.0-1.el8 ol8_appstream 41 k + libwayland-cursor x86_64 1.21.0-1.el8 ol8_appstream 26 k + libwayland-egl x86_64 1.21.0-1.el8 ol8_appstream 19 k + libxcb x86_64 1.13.1-1.el8 ol8_appstream 231 k + libxkbcommon x86_64 0.9.1-1.el8 ol8_appstream 116 k + lksctp-tools x86_64 1.0.18-3.el8 ol8_baseos_latest 100 k + lua x86_64 5.3.4-12.el8 ol8_appstream 192 k + nspr x86_64 4.35.0-1.el8_8 ol8_appstream 143 k + nss x86_64 3.90.0-7.el8_10 ol8_appstream 750 k + nss-softokn x86_64 3.90.0-7.el8_10 ol8_appstream 1.2 M + nss-softokn-freebl x86_64 3.90.0-7.el8_10 ol8_appstream 375 k + nss-sysinit x86_64 3.90.0-7.el8_10 ol8_appstream 74 k + nss-util x86_64 3.90.0-7.el8_10 ol8_appstream 139 k + os-prober x86_64 1.74-9.0.1.el8 ol8_baseos_latest 51 k + pango x86_64 1.42.4-8.el8 ol8_appstream 297 k + pixman x86_64 0.38.4-4.el8 ol8_appstream 256 k + pkgconf x86_64 1.4.2-1.el8 ol8_baseos_latest 38 k + pkgconf-m4 noarch 1.4.2-1.el8 ol8_baseos_latest 17 k + pkgconf-pkg-config x86_64 1.4.2-1.el8 ol8_baseos_latest 15 k + rest x86_64 0.8.1-2.el8 ol8_appstream 70 k + shared-mime-info x86_64 1.9-4.el8 ol8_baseos_latest 328 k + systemd-udev x86_64 239-78.0.4.el8 ol8_baseos_latest 1.6 M + ttmkfdir x86_64 3.0.9-54.el8 ol8_appstream 62 k + tzdata-java noarch 2024a-1.0.1.el8 ol8_appstream 186 k + xkeyboard-config noarch 2.28-1.el8 ol8_appstream 782 k + xorg-x11-font-utils x86_64 1:7.5-41.el8 ol8_appstream 104 k + xorg-x11-fonts-Type1 noarch 7.5-19.el8 ol8_appstream 522 k + xz x86_64 5.2.4-4.el8_6 ol8_baseos_latest 153 k +Installing weak dependencies: + abattis-cantarell-fonts noarch 0.0.25-6.el8 ol8_appstream 155 k + dconf x86_64 0.28.0-4.0.1.el8 ol8_appstream 108 k + dejavu-sans-mono-fonts noarch 2.35-7.el8 ol8_baseos_latest 447 k + grubby x86_64 8.40-49.0.2.el8 ol8_baseos_latest 50 k + gtk3 x86_64 3.22.30-11.el8 ol8_appstream 4.5 M + hardlink x86_64 1:1.3-6.el8 ol8_baseos_latest 29 k + kbd x86_64 2.0.4-11.el8 ol8_baseos_latest 390 k + memstrack x86_64 0.2.5-2.el8 ol8_baseos_latest 51 k + pigz x86_64 2.4-4.el8 ol8_baseos_latest 80 k Enabling module streams: - javapackages-runtime 201801 + javapackages-runtime 201801 Transaction Summary -============================================================================================= -Install 40 Packages +============================================================================================== +Install 106 Packages -Total download size: 50 M -Installed size: 196 M +Total download size: 86 M +Installed size: 312 M Downloading Packages: -(1/40): crypto-policies-scripts-20211116-1.gita 1.3 MB/s | 83 kB 00:00 -(2/40): avahi-libs-0.7-20.el8.x86_64.rpm 879 kB/s | 62 kB 00:00 -(3/40): libpkgconf-1.4.2-1.el8.x86_64.rpm 2.0 MB/s | 35 kB 00:00 -(4/40): cups-libs-2.2.6-45.el8_6.2.x86_64.rpm 4.5 MB/s | 434 kB 00:00 -(5/40): lksctp-tools-1.0.18-3.el8.x86_64.rpm 3.7 MB/s | 100 kB 00:00 -(6/40): pkgconf-1.4.2-1.el8.x86_64.rpm 2.2 MB/s | 38 kB 00:00 -(7/40): pkgconf-m4-1.4.2-1.el8.noarch.rpm 1.2 MB/s | 17 kB 00:00 -(8/40): pkgconf-pkg-config-1.4.2-1.el8.x86_64.r 929 kB/s | 15 kB 00:00 -(9/40): copy-jdk-configs-4.0-2.el8.noarch.rpm 2.2 MB/s | 30 kB 00:00 -(10/40): giflib-5.1.4-3.el8.x86_64.rpm 3.3 MB/s | 51 kB 00:00 -(11/40): graphite2-1.3.10-10.el8.x86_64.rpm 7.7 MB/s | 122 kB 00:00 -(12/40): alsa-lib-1.2.6.1-3.el8.x86_64.rpm 12 MB/s | 491 kB 00:00 -(13/40): java-11-openjdk-11.0.16.1.1-1.el8_6.x8 14 MB/s | 272 kB 00:00 -(14/40): harfbuzz-1.7.5-3.el8.x86_64.rpm 8.7 MB/s | 295 kB 00:00 -(15/40): javapackages-filesystem-5.3.0-1.module 2.0 MB/s | 30 kB 00:00 -(16/40): lcms2-2.9-2.el8.x86_64.rpm 6.7 MB/s | 164 kB 00:00 -(17/40): java-11-openjdk-devel-11.0.16.1.1-1.el 46 MB/s | 3.4 MB 00:00 -(18/40): libX11-common-1.6.8-5.el8.noarch.rpm 8.4 MB/s | 158 kB 00:00 -(19/40): libX11-1.6.8-5.el8.x86_64.rpm 17 MB/s | 611 kB 00:00 -(20/40): libXau-1.0.9-3.el8.x86_64.rpm 2.6 MB/s | 37 kB 00:00 -(21/40): libXcomposite-0.4.4-14.el8.x86_64.rpm 2.0 MB/s | 28 kB 00:00 -(22/40): libXi-1.7.10-1.el8.x86_64.rpm 2.2 MB/s | 49 kB 00:00 -(23/40): libXext-1.3.4-1.el8.x86_64.rpm 1.6 MB/s | 45 kB 00:00 -(24/40): libXtst-1.2.3-7.el8.x86_64.rpm 1.1 MB/s | 22 kB 00:00 -(25/40): libXrender-0.9.10-7.el8.x86_64.rpm 1.3 MB/s | 33 kB 00:00 -(26/40): libfontenc-1.1.3-8.el8.x86_64.rpm 2.2 MB/s | 37 kB 00:00 -(27/40): libjpeg-turbo-1.5.3-12.el8.x86_64.rpm 8.6 MB/s | 157 kB 00:00 -(28/40): libxcb-1.13.1-1.el8.x86_64.rpm 13 MB/s | 231 kB 00:00 -(29/40): lua-5.3.4-12.el8.x86_64.rpm 11 MB/s | 192 kB 00:00 -(30/40): nspr-4.34.0-3.el8_6.x86_64.rpm 7.8 MB/s | 143 kB 00:00 -(31/40): nss-3.79.0-10.el8_6.x86_64.rpm 23 MB/s | 747 kB 00:00 -(32/40): nss-softokn-3.79.0-10.el8_6.x86_64.rpm 42 MB/s | 1.2 MB 00:00 -(33/40): nss-softokn-freebl-3.79.0-10.el8_6.x86 19 MB/s | 398 kB 00:00 -(34/40): nss-sysinit-3.79.0-10.el8_6.x86_64.rpm 5.3 MB/s | 74 kB 00:00 -(35/40): nss-util-3.79.0-10.el8_6.x86_64.rpm 8.7 MB/s | 138 kB 00:00 -(36/40): ttmkfdir-3.0.9-54.el8.x86_64.rpm 4.2 MB/s | 62 kB 00:00 -(37/40): tzdata-java-2022d-1.el8.noarch.rpm 11 MB/s | 186 kB 00:00 -(38/40): xorg-x11-font-utils-7.5-41.el8.x86_64. 6.7 MB/s | 104 kB 00:00 -(39/40): xorg-x11-fonts-Type1-7.5-19.el8.noarch 24 MB/s | 522 kB 00:00 -(40/40): java-11-openjdk-headless-11.0.16.1.1-1 77 MB/s | 40 MB 00:00 +(1/106): crypto-policies-scripts-20230731-1.git 862 kB/s | 84 kB 00:00 +(2/106): avahi-libs-0.7-27.el8.x86_64.rpm 602 kB/s | 61 kB 00:00 +(3/106): cpio-2.12-11.el8.x86_64.rpm 1.8 MB/s | 266 kB 00:00 +(4/106): cups-libs-2.2.6-60.el8_10.x86_64.rpm 5.7 MB/s | 435 kB 00:00 +(5/106): dejavu-sans-mono-fonts-2.35-7.el8.noar 5.1 MB/s | 447 kB 00:00 +(6/106): dracut-049-233.git20240115.0.1.el8.x86 7.0 MB/s | 382 kB 00:00 +(7/106): gdk-pixbuf2-2.36.12-6.el8_10.x86_64.rp 12 MB/s | 465 kB 00:00 +(8/106): gettext-libs-0.19.8.1-17.el8.x86_64.rp 9.3 MB/s | 312 kB 00:00 +(9/106): gettext-0.19.8.1-17.el8.x86_64.rpm 16 MB/s | 1.1 MB 00:00 +(10/106): glib-networking-2.56.1-1.1.el8.x86_64 6.0 MB/s | 155 kB 00:00 +(11/106): grub2-common-2.02-156.0.2.el8.noarch. 26 MB/s | 897 kB 00:00 +(12/106): grub2-tools-minimal-2.02-156.0.2.el8. 8.2 MB/s | 215 kB 00:00 +(13/106): grubby-8.40-49.0.2.el8.x86_64.rpm 2.1 MB/s | 50 kB 00:00 +(14/106): grub2-tools-2.02-156.0.2.el8.x86_64.r 26 MB/s | 2.0 MB 00:00 +(15/106): gsettings-desktop-schemas-3.32.0-6.el 19 MB/s | 633 kB 00:00 +(16/106): hardlink-1.3-6.el8.x86_64.rpm 1.1 MB/s | 29 kB 00:00 +(17/106): json-glib-1.4.4-1.el8.x86_64.rpm 5.9 MB/s | 144 kB 00:00 +(18/106): kbd-2.0.4-11.el8.x86_64.rpm 14 MB/s | 390 kB 00:00 +(19/106): kbd-legacy-2.0.4-11.el8.noarch.rpm 17 MB/s | 481 kB 00:00 +(20/106): kbd-misc-2.0.4-11.el8.noarch.rpm 41 MB/s | 1.5 MB 00:00 +(21/106): libcroco-0.6.12-4.el8_2.1.x86_64.rpm 4.7 MB/s | 113 kB 00:00 +(22/106): libgomp-8.5.0-22.0.1.el8_10.x86_64.rp 9.1 MB/s | 218 kB 00:00 +(23/106): libgusb-0.3.0-1.el8.x86_64.rpm 2.1 MB/s | 49 kB 00:00 +(24/106): libkcapi-1.4.0-2.0.1.el8.x86_64.rpm 1.6 MB/s | 52 kB 00:00 +(25/106): libkcapi-hmaccalc-1.4.0-2.0.1.el8.x86 822 kB/s | 31 kB 00:00 +(26/106): libmodman-2.0.1-17.el8.x86_64.rpm 1.6 MB/s | 36 kB 00:00 +(27/106): libpkgconf-1.4.2-1.el8.x86_64.rpm 1.2 MB/s | 35 kB 00:00 +(28/106): libproxy-0.4.15-5.2.el8.x86_64.rpm 3.0 MB/s | 75 kB 00:00 +(29/106): libsoup-2.62.3-5.el8.x86_64.rpm 15 MB/s | 424 kB 00:00 +(30/106): lksctp-tools-1.0.18-3.el8.x86_64.rpm 3.5 MB/s | 100 kB 00:00 +(31/106): memstrack-0.2.5-2.el8.x86_64.rpm 2.2 MB/s | 51 kB 00:00 +(32/106): os-prober-1.74-9.0.1.el8.x86_64.rpm 2.2 MB/s | 51 kB 00:00 +(33/106): pigz-2.4-4.el8.x86_64.rpm 3.5 MB/s | 80 kB 00:00 +(34/106): pkgconf-1.4.2-1.el8.x86_64.rpm 1.7 MB/s | 38 kB 00:00 +(35/106): pkgconf-m4-1.4.2-1.el8.noarch.rpm 761 kB/s | 17 kB 00:00 +(36/106): pkgconf-pkg-config-1.4.2-1.el8.x86_64 691 kB/s | 15 kB 00:00 +(37/106): shared-mime-info-1.9-4.el8.x86_64.rpm 13 MB/s | 328 kB 00:00 +(38/106): systemd-udev-239-78.0.4.el8.x86_64.rp 32 MB/s | 1.6 MB 00:00 +(39/106): xz-5.2.4-4.el8_6.x86_64.rpm 5.2 MB/s | 153 kB 00:00 +(40/106): abattis-cantarell-fonts-0.0.25-6.el8. 6.4 MB/s | 155 kB 00:00 +(41/106): adwaita-cursor-theme-3.28.0-3.el8.noa 22 MB/s | 647 kB 00:00 +(42/106): alsa-lib-1.2.10-2.el8.x86_64.rpm 18 MB/s | 500 kB 00:00 +(43/106): at-spi2-atk-2.26.2-1.el8.x86_64.rpm 3.8 MB/s | 89 kB 00:00 +(44/106): at-spi2-core-2.28.0-1.el8.x86_64.rpm 6.9 MB/s | 169 kB 00:00 +(45/106): atk-2.28.1-1.el8.x86_64.rpm 9.2 MB/s | 272 kB 00:00 +(46/106): cairo-1.15.12-6.el8.x86_64.rpm 24 MB/s | 719 kB 00:00 +(47/106): adwaita-icon-theme-3.28.0-3.el8.noarc 65 MB/s | 11 MB 00:00 +(48/106): cairo-gobject-1.15.12-6.el8.x86_64.rp 914 kB/s | 33 kB 00:00 +(49/106): colord-libs-1.4.2-1.el8.x86_64.rpm 9.5 MB/s | 236 kB 00:00 +(50/106): copy-jdk-configs-4.0-2.el8.noarch.rpm 1.1 MB/s | 30 kB 00:00 +(51/106): dconf-0.28.0-4.0.1.el8.x86_64.rpm 4.4 MB/s | 108 kB 00:00 +(52/106): fribidi-1.0.4-9.el8.x86_64.rpm 3.9 MB/s | 89 kB 00:00 +(53/106): graphite2-1.3.10-10.el8.x86_64.rpm 5.1 MB/s | 122 kB 00:00 +(54/106): gdk-pixbuf2-modules-2.36.12-6.el8_10. 3.6 MB/s | 108 kB 00:00 +(55/106): gtk-update-icon-cache-3.22.30-11.el8. 1.4 MB/s | 32 kB 00:00 +(56/106): harfbuzz-1.7.5-4.el8.x86_64.rpm 11 MB/s | 295 kB 00:00 +(57/106): gtk3-3.22.30-11.el8.x86_64.rpm 68 MB/s | 4.5 MB 00:00 +(58/106): hicolor-icon-theme-0.17-2.el8.noarch. 2.1 MB/s | 48 kB 00:00 +(59/106): java-11-openjdk-11.0.24.0.8-3.0.1.el8 17 MB/s | 475 kB 00:00 +(60/106): jasper-libs-2.0.14-5.el8.x86_64.rpm 5.0 MB/s | 167 kB 00:00 +(61/106): java-11-openjdk-devel-11.0.24.0.8-3.0 61 MB/s | 3.4 MB 00:00 +(62/106): javapackages-filesystem-5.3.0-1.modul 1.2 MB/s | 30 kB 00:00 +(63/106): jbigkit-libs-2.1-14.el8.x86_64.rpm 2.1 MB/s | 55 kB 00:00 +(64/106): lcms2-2.9-2.el8.x86_64.rpm 3.8 MB/s | 164 kB 00:00 +(65/106): libX11-1.6.8-8.el8.x86_64.rpm 20 MB/s | 611 kB 00:00 +(66/106): libX11-common-1.6.8-8.el8.noarch.rpm 6.8 MB/s | 157 kB 00:00 +(67/106): libXau-1.0.9-3.el8.x86_64.rpm 1.6 MB/s | 37 kB 00:00 +(68/106): libXcomposite-0.4.4-14.el8.x86_64.rpm 1.3 MB/s | 28 kB 00:00 +(69/106): libXcursor-1.1.15-3.el8.x86_64.rpm 1.6 MB/s | 36 kB 00:00 +(70/106): libXdamage-1.1.4-14.el8.x86_64.rpm 1.2 MB/s | 27 kB 00:00 +(71/106): libXext-1.3.4-1.el8.x86_64.rpm 2.0 MB/s | 45 kB 00:00 +(72/106): libXfixes-5.0.3-7.el8.x86_64.rpm 1.1 MB/s | 25 kB 00:00 +(73/106): libXft-2.3.3-1.el8.x86_64.rpm 2.9 MB/s | 67 kB 00:00 +(74/106): libXi-1.7.10-1.el8.x86_64.rpm 2.2 MB/s | 49 kB 00:00 +(75/106): libXinerama-1.1.4-1.el8.x86_64.rpm 717 kB/s | 15 kB 00:00 +(76/106): libXrandr-1.5.2-1.el8.x86_64.rpm 1.5 MB/s | 34 kB 00:00 +(77/106): libXrender-0.9.10-7.el8.x86_64.rpm 1.4 MB/s | 33 kB 00:00 +(78/106): libXtst-1.2.3-7.el8.x86_64.rpm 957 kB/s | 22 kB 00:00 +(79/106): java-11-openjdk-headless-11.0.24.0.8- 71 MB/s | 42 MB 00:00 +(80/106): libdatrie-0.2.9-7.el8.x86_64.rpm 274 kB/s | 33 kB 00:00 +(81/106): libepoxy-1.5.8-1.el8.x86_64.rpm 9.1 MB/s | 225 kB 00:00 +(82/106): libfontenc-1.1.3-8.el8.x86_64.rpm 1.5 MB/s | 37 kB 00:00 +(83/106): libthai-0.1.27-2.el8.x86_64.rpm 8.2 MB/s | 203 kB 00:00 +(84/106): libjpeg-turbo-1.5.3-12.el8.x86_64.rpm 5.1 MB/s | 157 kB 00:00 +(85/106): libtiff-4.0.9-32.el8_10.x86_64.rpm 7.8 MB/s | 189 kB 00:00 +(86/106): libwayland-client-1.21.0-1.el8.x86_64 1.7 MB/s | 41 kB 00:00 +(87/106): libwayland-cursor-1.21.0-1.el8.x86_64 1.2 MB/s | 26 kB 00:00 +(88/106): libwayland-egl-1.21.0-1.el8.x86_64.rp 801 kB/s | 19 kB 00:00 +(89/106): libxcb-1.13.1-1.el8.x86_64.rpm 9.7 MB/s | 231 kB 00:00 +(90/106): libxkbcommon-0.9.1-1.el8.x86_64.rpm 5.0 MB/s | 116 kB 00:00 +(91/106): nspr-4.35.0-1.el8_8.x86_64.rpm 6.0 MB/s | 143 kB 00:00 +(92/106): lua-5.3.4-12.el8.x86_64.rpm 5.9 MB/s | 192 kB 00:00 +(93/106): nss-softokn-3.90.0-7.el8_10.x86_64.rp 38 MB/s | 1.2 MB 00:00 +(94/106): nss-3.90.0-7.el8_10.x86_64.rpm 17 MB/s | 750 kB 00:00 +(95/106): nss-softokn-freebl-3.90.0-7.el8_10.x8 14 MB/s | 375 kB 00:00 +(96/106): nss-sysinit-3.90.0-7.el8_10.x86_64.rp 3.2 MB/s | 74 kB 00:00 +(97/106): nss-util-3.90.0-7.el8_10.x86_64.rpm 5.8 MB/s | 139 kB 00:00 +(98/106): pango-1.42.4-8.el8.x86_64.rpm 11 MB/s | 297 kB 00:00 +(99/106): pixman-0.38.4-4.el8.x86_64.rpm 10 MB/s | 256 kB 00:00 +(100/106): rest-0.8.1-2.el8.x86_64.rpm 3.1 MB/s | 70 kB 00:00 +(101/106): ttmkfdir-3.0.9-54.el8.x86_64.rpm 2.5 MB/s | 62 kB 00:00 +(102/106): tzdata-java-2024a-1.0.1.el8.noarch.r 7.4 MB/s | 186 kB 00:00 +(103/106): xkeyboard-config-2.28-1.el8.noarch.r 27 MB/s | 782 kB 00:00 +(104/106): xorg-x11-font-utils-7.5-41.el8.x86_6 3.9 MB/s | 104 kB 00:00 +(105/106): xorg-x11-fonts-Type1-7.5-19.el8.noar 1.3 MB/s | 522 kB 00:00 +(106/106): file-5.33-25.el8.x86_64.rpm 26 kB/s | 77 kB 00:02 -------------------------------------------------------------------------------- -Total 74 MB/s | 50 MB 00:00 +Total 27 MB/s | 86 MB 00:03 Running transaction check Transaction check succeeded. Running transaction test Transaction test succeeded. Running transaction Running scriptlet: copy-jdk-configs-4.0-2.el8.noarch 1/1 - Running scriptlet: java-11-openjdk-headless-1:11.0.16.1.1-1.el8_6.x86_6 1/1 + Running scriptlet: java-11-openjdk-headless-1:11.0.24.0.8-3.0.1.el8.x86 1/1 Preparing : 1/1 - Installing : nspr-4.34.0-3.el8_6.x86_64 1/40 - Running scriptlet: nspr-4.34.0-3.el8_6.x86_64 1/40 - Installing : nss-util-3.79.0-10.el8_6.x86_64 2/40 - Installing : libjpeg-turbo-1.5.3-12.el8.x86_64 3/40 - Installing : nss-softokn-freebl-3.79.0-10.el8_6.x86_64 4/40 - Installing : nss-softokn-3.79.0-10.el8_6.x86_64 5/40 - Installing : tzdata-java-2022d-1.el8.noarch 6/40 - Installing : ttmkfdir-3.0.9-54.el8.x86_64 7/40 - Installing : lua-5.3.4-12.el8.x86_64 8/40 - Installing : copy-jdk-configs-4.0-2.el8.noarch 9/40 - Installing : libfontenc-1.1.3-8.el8.x86_64 10/40 - Installing : libXau-1.0.9-3.el8.x86_64 11/40 - Installing : libxcb-1.13.1-1.el8.x86_64 12/40 - Installing : libX11-common-1.6.8-5.el8.noarch 13/40 - Installing : libX11-1.6.8-5.el8.x86_64 14/40 - Installing : libXext-1.3.4-1.el8.x86_64 15/40 - Installing : libXi-1.7.10-1.el8.x86_64 16/40 - Installing : libXtst-1.2.3-7.el8.x86_64 17/40 - Installing : libXcomposite-0.4.4-14.el8.x86_64 18/40 - Installing : libXrender-0.9.10-7.el8.x86_64 19/40 - Installing : lcms2-2.9-2.el8.x86_64 20/40 - Running scriptlet: lcms2-2.9-2.el8.x86_64 20/40 - Installing : javapackages-filesystem-5.3.0-1.module+el8+5136+7f 21/40 - Installing : graphite2-1.3.10-10.el8.x86_64 22/40 - Installing : harfbuzz-1.7.5-3.el8.x86_64 23/40 - Running scriptlet: harfbuzz-1.7.5-3.el8.x86_64 23/40 - Installing : giflib-5.1.4-3.el8.x86_64 24/40 - Installing : alsa-lib-1.2.6.1-3.el8.x86_64 25/40 - Running scriptlet: alsa-lib-1.2.6.1-3.el8.x86_64 25/40 - Installing : pkgconf-m4-1.4.2-1.el8.noarch 26/40 - Installing : lksctp-tools-1.0.18-3.el8.x86_64 27/40 - Running scriptlet: lksctp-tools-1.0.18-3.el8.x86_64 27/40 - Installing : libpkgconf-1.4.2-1.el8.x86_64 28/40 - Installing : pkgconf-1.4.2-1.el8.x86_64 29/40 - Installing : pkgconf-pkg-config-1.4.2-1.el8.x86_64 30/40 - Installing : xorg-x11-font-utils-1:7.5-41.el8.x86_64 31/40 - Installing : xorg-x11-fonts-Type1-7.5-19.el8.noarch 32/40 - Running scriptlet: xorg-x11-fonts-Type1-7.5-19.el8.noarch 32/40 - Installing : crypto-policies-scripts-20211116-1.gitae470d6.el8. 33/40 - Installing : nss-sysinit-3.79.0-10.el8_6.x86_64 34/40 - Installing : nss-3.79.0-10.el8_6.x86_64 35/40 - Installing : avahi-libs-0.7-20.el8.x86_64 36/40 - Installing : cups-libs-1:2.2.6-45.el8_6.2.x86_64 37/40 - Installing : java-11-openjdk-headless-1:11.0.16.1.1-1.el8_6.x86 38/40 - Running scriptlet: java-11-openjdk-headless-1:11.0.16.1.1-1.el8_6.x86 38/40 - Installing : java-11-openjdk-1:11.0.16.1.1-1.el8_6.x86_64 39/40 - Running scriptlet: java-11-openjdk-1:11.0.16.1.1-1.el8_6.x86_64 39/40 - Installing : java-11-openjdk-devel-1:11.0.16.1.1-1.el8_6.x86_64 40/40 - Running scriptlet: java-11-openjdk-devel-1:11.0.16.1.1-1.el8_6.x86_64 40/40 - Running scriptlet: copy-jdk-configs-4.0-2.el8.noarch 40/40 - Running scriptlet: crypto-policies-scripts-20211116-1.gitae470d6.el8. 40/40 - Running scriptlet: nss-3.79.0-10.el8_6.x86_64 40/40 - Running scriptlet: java-11-openjdk-headless-1:11.0.16.1.1-1.el8_6.x86 40/40 - Running scriptlet: java-11-openjdk-1:11.0.16.1.1-1.el8_6.x86_64 40/40 - Running scriptlet: java-11-openjdk-devel-1:11.0.16.1.1-1.el8_6.x86_64 40/40 - Verifying : avahi-libs-0.7-20.el8.x86_64 1/40 - Verifying : crypto-policies-scripts-20211116-1.gitae470d6.el8. 2/40 - Verifying : cups-libs-1:2.2.6-45.el8_6.2.x86_64 3/40 - Verifying : libpkgconf-1.4.2-1.el8.x86_64 4/40 - Verifying : lksctp-tools-1.0.18-3.el8.x86_64 5/40 - Verifying : pkgconf-1.4.2-1.el8.x86_64 6/40 - Verifying : pkgconf-m4-1.4.2-1.el8.noarch 7/40 - Verifying : pkgconf-pkg-config-1.4.2-1.el8.x86_64 8/40 - Verifying : alsa-lib-1.2.6.1-3.el8.x86_64 9/40 - Verifying : copy-jdk-configs-4.0-2.el8.noarch 10/40 - Verifying : giflib-5.1.4-3.el8.x86_64 11/40 - Verifying : graphite2-1.3.10-10.el8.x86_64 12/40 - Verifying : harfbuzz-1.7.5-3.el8.x86_64 13/40 - Verifying : java-11-openjdk-1:11.0.16.1.1-1.el8_6.x86_64 14/40 - Verifying : java-11-openjdk-devel-1:11.0.16.1.1-1.el8_6.x86_64 15/40 - Verifying : java-11-openjdk-headless-1:11.0.16.1.1-1.el8_6.x86 16/40 - Verifying : javapackages-filesystem-5.3.0-1.module+el8+5136+7f 17/40 - Verifying : lcms2-2.9-2.el8.x86_64 18/40 - Verifying : libX11-1.6.8-5.el8.x86_64 19/40 - Verifying : libX11-common-1.6.8-5.el8.noarch 20/40 - Verifying : libXau-1.0.9-3.el8.x86_64 21/40 - Verifying : libXcomposite-0.4.4-14.el8.x86_64 22/40 - Verifying : libXext-1.3.4-1.el8.x86_64 23/40 - Verifying : libXi-1.7.10-1.el8.x86_64 24/40 - Verifying : libXrender-0.9.10-7.el8.x86_64 25/40 - Verifying : libXtst-1.2.3-7.el8.x86_64 26/40 - Verifying : libfontenc-1.1.3-8.el8.x86_64 27/40 - Verifying : libjpeg-turbo-1.5.3-12.el8.x86_64 28/40 - Verifying : libxcb-1.13.1-1.el8.x86_64 29/40 - Verifying : lua-5.3.4-12.el8.x86_64 30/40 - Verifying : nspr-4.34.0-3.el8_6.x86_64 31/40 - Verifying : nss-3.79.0-10.el8_6.x86_64 32/40 - Verifying : nss-softokn-3.79.0-10.el8_6.x86_64 33/40 - Verifying : nss-softokn-freebl-3.79.0-10.el8_6.x86_64 34/40 - Verifying : nss-sysinit-3.79.0-10.el8_6.x86_64 35/40 - Verifying : nss-util-3.79.0-10.el8_6.x86_64 36/40 - Verifying : ttmkfdir-3.0.9-54.el8.x86_64 37/40 - Verifying : tzdata-java-2022d-1.el8.noarch 38/40 - Verifying : xorg-x11-font-utils-1:7.5-41.el8.x86_64 39/40 - Verifying : xorg-x11-fonts-Type1-7.5-19.el8.noarch 40/40 + Installing : nspr-4.35.0-1.el8_8.x86_64 1/106 + Running scriptlet: nspr-4.35.0-1.el8_8.x86_64 1/106 + Installing : nss-util-3.90.0-7.el8_10.x86_64 2/106 + Installing : libjpeg-turbo-1.5.3-12.el8.x86_64 3/106 + Installing : pixman-0.38.4-4.el8.x86_64 4/106 + Installing : libwayland-client-1.21.0-1.el8.x86_64 5/106 + Installing : atk-2.28.1-1.el8.x86_64 6/106 + Installing : libgomp-8.5.0-22.0.1.el8_10.x86_64 7/106 + Running scriptlet: libgomp-8.5.0-22.0.1.el8_10.x86_64 7/106 + Installing : libcroco-0.6.12-4.el8_2.1.x86_64 8/106 + Running scriptlet: libcroco-0.6.12-4.el8_2.1.x86_64 8/106 + Installing : grub2-common-1:2.02-156.0.2.el8.noarch 9/106 + Installing : gettext-libs-0.19.8.1-17.el8.x86_64 10/106 + Installing : gettext-0.19.8.1-17.el8.x86_64 11/106 + Running scriptlet: gettext-0.19.8.1-17.el8.x86_64 11/106 + Installing : grub2-tools-minimal-1:2.02-156.0.2.el8.x86_64 12/106 + Installing : libwayland-cursor-1.21.0-1.el8.x86_64 13/106 + Installing : jasper-libs-2.0.14-5.el8.x86_64 14/106 + Installing : nss-softokn-freebl-3.90.0-7.el8_10.x86_64 15/106 + Installing : nss-softokn-3.90.0-7.el8_10.x86_64 16/106 + Installing : xkeyboard-config-2.28-1.el8.noarch 17/106 + Installing : libxkbcommon-0.9.1-1.el8.x86_64 18/106 + Installing : tzdata-java-2024a-1.0.1.el8.noarch 19/106 + Installing : ttmkfdir-3.0.9-54.el8.x86_64 20/106 + Installing : lua-5.3.4-12.el8.x86_64 21/106 + Installing : copy-jdk-configs-4.0-2.el8.noarch 22/106 + Installing : libwayland-egl-1.21.0-1.el8.x86_64 23/106 + Installing : libfontenc-1.1.3-8.el8.x86_64 24/106 + Installing : libepoxy-1.5.8-1.el8.x86_64 25/106 + Installing : libdatrie-0.2.9-7.el8.x86_64 26/106 + Running scriptlet: libdatrie-0.2.9-7.el8.x86_64 26/106 + Installing : libthai-0.1.27-2.el8.x86_64 27/106 + Running scriptlet: libthai-0.1.27-2.el8.x86_64 27/106 + Installing : libXau-1.0.9-3.el8.x86_64 28/106 + Installing : libxcb-1.13.1-1.el8.x86_64 29/106 + Installing : libX11-common-1.6.8-8.el8.noarch 30/106 + Installing : libX11-1.6.8-8.el8.x86_64 31/106 + Installing : libXext-1.3.4-1.el8.x86_64 32/106 + Installing : libXrender-0.9.10-7.el8.x86_64 33/106 + Installing : cairo-1.15.12-6.el8.x86_64 34/106 + Installing : libXi-1.7.10-1.el8.x86_64 35/106 + Installing : libXfixes-5.0.3-7.el8.x86_64 36/106 + Installing : libXtst-1.2.3-7.el8.x86_64 37/106 + Installing : libXcomposite-0.4.4-14.el8.x86_64 38/106 + Installing : at-spi2-core-2.28.0-1.el8.x86_64 39/106 + Running scriptlet: at-spi2-core-2.28.0-1.el8.x86_64 39/106 + Installing : at-spi2-atk-2.26.2-1.el8.x86_64 40/106 + Running scriptlet: at-spi2-atk-2.26.2-1.el8.x86_64 40/106 + Installing : libXcursor-1.1.15-3.el8.x86_64 41/106 + Installing : libXdamage-1.1.4-14.el8.x86_64 42/106 + Installing : cairo-gobject-1.15.12-6.el8.x86_64 43/106 + Installing : libXft-2.3.3-1.el8.x86_64 44/106 + Installing : libXrandr-1.5.2-1.el8.x86_64 45/106 + Installing : libXinerama-1.1.4-1.el8.x86_64 46/106 + Installing : lcms2-2.9-2.el8.x86_64 47/106 + Running scriptlet: lcms2-2.9-2.el8.x86_64 47/106 + Installing : jbigkit-libs-2.1-14.el8.x86_64 48/106 + Running scriptlet: jbigkit-libs-2.1-14.el8.x86_64 48/106 + Installing : libtiff-4.0.9-32.el8_10.x86_64 49/106 + Installing : javapackages-filesystem-5.3.0-1.module+el8+5136+ 50/106 + Installing : hicolor-icon-theme-0.17-2.el8.noarch 51/106 + Installing : graphite2-1.3.10-10.el8.x86_64 52/106 + Installing : harfbuzz-1.7.5-4.el8.x86_64 53/106 + Running scriptlet: harfbuzz-1.7.5-4.el8.x86_64 53/106 + Installing : fribidi-1.0.4-9.el8.x86_64 54/106 + Installing : pango-1.42.4-8.el8.x86_64 55/106 + Running scriptlet: pango-1.42.4-8.el8.x86_64 55/106 + Installing : dconf-0.28.0-4.0.1.el8.x86_64 56/106 + Installing : alsa-lib-1.2.10-2.el8.x86_64 57/106 + Running scriptlet: alsa-lib-1.2.10-2.el8.x86_64 57/106 + Installing : adwaita-cursor-theme-3.28.0-3.el8.noarch 58/106 + Installing : adwaita-icon-theme-3.28.0-3.el8.noarch 59/106 + Installing : abattis-cantarell-fonts-0.0.25-6.el8.noarch 60/106 + Installing : xz-5.2.4-4.el8_6.x86_64 61/106 + Installing : shared-mime-info-1.9-4.el8.x86_64 62/106 + Running scriptlet: shared-mime-info-1.9-4.el8.x86_64 62/106 + Installing : gdk-pixbuf2-2.36.12-6.el8_10.x86_64 63/106 + Running scriptlet: gdk-pixbuf2-2.36.12-6.el8_10.x86_64 63/106 + Installing : gdk-pixbuf2-modules-2.36.12-6.el8_10.x86_64 64/106 + Installing : gtk-update-icon-cache-3.22.30-11.el8.x86_64 65/106 + Installing : pkgconf-m4-1.4.2-1.el8.noarch 66/106 + Installing : pigz-2.4-4.el8.x86_64 67/106 + Installing : memstrack-0.2.5-2.el8.x86_64 68/106 + Installing : lksctp-tools-1.0.18-3.el8.x86_64 69/106 + Running scriptlet: lksctp-tools-1.0.18-3.el8.x86_64 69/106 + Installing : libpkgconf-1.4.2-1.el8.x86_64 70/106 + Installing : pkgconf-1.4.2-1.el8.x86_64 71/106 + Installing : pkgconf-pkg-config-1.4.2-1.el8.x86_64 72/106 + Installing : xorg-x11-font-utils-1:7.5-41.el8.x86_64 73/106 + Installing : xorg-x11-fonts-Type1-7.5-19.el8.noarch 74/106 + Running scriptlet: xorg-x11-fonts-Type1-7.5-19.el8.noarch 74/106 + Installing : libmodman-2.0.1-17.el8.x86_64 75/106 + Running scriptlet: libmodman-2.0.1-17.el8.x86_64 75/106 + Installing : libproxy-0.4.15-5.2.el8.x86_64 76/106 + Running scriptlet: libproxy-0.4.15-5.2.el8.x86_64 76/106 + Installing : libkcapi-1.4.0-2.0.1.el8.x86_64 77/106 + Installing : libkcapi-hmaccalc-1.4.0-2.0.1.el8.x86_64 78/106 + Installing : libgusb-0.3.0-1.el8.x86_64 79/106 + Installing : colord-libs-1.4.2-1.el8.x86_64 80/106 + Installing : kbd-misc-2.0.4-11.el8.noarch 81/106 + Installing : kbd-legacy-2.0.4-11.el8.noarch 82/106 + Installing : kbd-2.0.4-11.el8.x86_64 83/106 + Installing : systemd-udev-239-78.0.4.el8.x86_64 84/106 + Running scriptlet: systemd-udev-239-78.0.4.el8.x86_64 84/106 + Installing : os-prober-1.74-9.0.1.el8.x86_64 85/106 + Installing : json-glib-1.4.4-1.el8.x86_64 86/106 + Installing : hardlink-1:1.3-6.el8.x86_64 87/106 + Installing : file-5.33-25.el8.x86_64 88/106 + Installing : dejavu-sans-mono-fonts-2.35-7.el8.noarch 89/106 + Installing : gsettings-desktop-schemas-3.32.0-6.el8.x86_64 90/106 + Installing : glib-networking-2.56.1-1.1.el8.x86_64 91/106 + Installing : libsoup-2.62.3-5.el8.x86_64 92/106 + Installing : rest-0.8.1-2.el8.x86_64 93/106 + Running scriptlet: rest-0.8.1-2.el8.x86_64 93/106 + Installing : cpio-2.12-11.el8.x86_64 94/106 + Installing : dracut-049-233.git20240115.0.1.el8.x86_64 95/106 + Running scriptlet: grub2-tools-1:2.02-156.0.2.el8.x86_64 96/106 + Installing : grub2-tools-1:2.02-156.0.2.el8.x86_64 96/106 + Running scriptlet: grub2-tools-1:2.02-156.0.2.el8.x86_64 96/106 + Installing : grubby-8.40-49.0.2.el8.x86_64 97/106 + Installing : crypto-policies-scripts-20230731-1.git3177e06.el 98/106 + Installing : nss-sysinit-3.90.0-7.el8_10.x86_64 99/106 + Installing : nss-3.90.0-7.el8_10.x86_64 100/106 + Installing : avahi-libs-0.7-27.el8.x86_64 101/106 + Installing : cups-libs-1:2.2.6-60.el8_10.x86_64 102/106 + Installing : java-11-openjdk-headless-1:11.0.24.0.8-3.0.1.el8 103/106 + Running scriptlet: java-11-openjdk-headless-1:11.0.24.0.8-3.0.1.el8 103/106 + Installing : gtk3-3.22.30-11.el8.x86_64 104/106 + Installing : java-11-openjdk-1:11.0.24.0.8-3.0.1.el8.x86_64 105/106 + Running scriptlet: java-11-openjdk-1:11.0.24.0.8-3.0.1.el8.x86_64 105/106 + Installing : java-11-openjdk-devel-1:11.0.24.0.8-3.0.1.el8.x8 106/106 + Running scriptlet: java-11-openjdk-devel-1:11.0.24.0.8-3.0.1.el8.x8 106/106 + Running scriptlet: copy-jdk-configs-4.0-2.el8.noarch 106/106 + Running scriptlet: dconf-0.28.0-4.0.1.el8.x86_64 106/106 + Running scriptlet: crypto-policies-scripts-20230731-1.git3177e06.el 106/106 + Running scriptlet: nss-3.90.0-7.el8_10.x86_64 106/106 + Running scriptlet: java-11-openjdk-headless-1:11.0.24.0.8-3.0.1.el8 106/106 + Running scriptlet: java-11-openjdk-1:11.0.24.0.8-3.0.1.el8.x86_64 106/106 + Running scriptlet: java-11-openjdk-devel-1:11.0.24.0.8-3.0.1.el8.x8 106/106 + Running scriptlet: hicolor-icon-theme-0.17-2.el8.noarch 106/106 + Running scriptlet: adwaita-icon-theme-3.28.0-3.el8.noarch 106/106 + Running scriptlet: shared-mime-info-1.9-4.el8.x86_64 106/106 + Running scriptlet: gdk-pixbuf2-2.36.12-6.el8_10.x86_64 106/106 + Running scriptlet: systemd-udev-239-78.0.4.el8.x86_64 106/106 + Verifying : avahi-libs-0.7-27.el8.x86_64 1/106 + Verifying : cpio-2.12-11.el8.x86_64 2/106 + Verifying : crypto-policies-scripts-20230731-1.git3177e06.el 3/106 + Verifying : cups-libs-1:2.2.6-60.el8_10.x86_64 4/106 + Verifying : dejavu-sans-mono-fonts-2.35-7.el8.noarch 5/106 + Verifying : dracut-049-233.git20240115.0.1.el8.x86_64 6/106 + Verifying : file-5.33-25.el8.x86_64 7/106 + Verifying : gdk-pixbuf2-2.36.12-6.el8_10.x86_64 8/106 + Verifying : gettext-0.19.8.1-17.el8.x86_64 9/106 + Verifying : gettext-libs-0.19.8.1-17.el8.x86_64 10/106 + Verifying : glib-networking-2.56.1-1.1.el8.x86_64 11/106 + Verifying : grub2-common-1:2.02-156.0.2.el8.noarch 12/106 + Verifying : grub2-tools-1:2.02-156.0.2.el8.x86_64 13/106 + Verifying : grub2-tools-minimal-1:2.02-156.0.2.el8.x86_64 14/106 + Verifying : grubby-8.40-49.0.2.el8.x86_64 15/106 + Verifying : gsettings-desktop-schemas-3.32.0-6.el8.x86_64 16/106 + Verifying : hardlink-1:1.3-6.el8.x86_64 17/106 + Verifying : json-glib-1.4.4-1.el8.x86_64 18/106 + Verifying : kbd-2.0.4-11.el8.x86_64 19/106 + Verifying : kbd-legacy-2.0.4-11.el8.noarch 20/106 + Verifying : kbd-misc-2.0.4-11.el8.noarch 21/106 + Verifying : libcroco-0.6.12-4.el8_2.1.x86_64 22/106 + Verifying : libgomp-8.5.0-22.0.1.el8_10.x86_64 23/106 + Verifying : libgusb-0.3.0-1.el8.x86_64 24/106 + Verifying : libkcapi-1.4.0-2.0.1.el8.x86_64 25/106 + Verifying : libkcapi-hmaccalc-1.4.0-2.0.1.el8.x86_64 26/106 + Verifying : libmodman-2.0.1-17.el8.x86_64 27/106 + Verifying : libpkgconf-1.4.2-1.el8.x86_64 28/106 + Verifying : libproxy-0.4.15-5.2.el8.x86_64 29/106 + Verifying : libsoup-2.62.3-5.el8.x86_64 30/106 + Verifying : lksctp-tools-1.0.18-3.el8.x86_64 31/106 + Verifying : memstrack-0.2.5-2.el8.x86_64 32/106 + Verifying : os-prober-1.74-9.0.1.el8.x86_64 33/106 + Verifying : pigz-2.4-4.el8.x86_64 34/106 + Verifying : pkgconf-1.4.2-1.el8.x86_64 35/106 + Verifying : pkgconf-m4-1.4.2-1.el8.noarch 36/106 + Verifying : pkgconf-pkg-config-1.4.2-1.el8.x86_64 37/106 + Verifying : shared-mime-info-1.9-4.el8.x86_64 38/106 + Verifying : systemd-udev-239-78.0.4.el8.x86_64 39/106 + Verifying : xz-5.2.4-4.el8_6.x86_64 40/106 + Verifying : abattis-cantarell-fonts-0.0.25-6.el8.noarch 41/106 + Verifying : adwaita-cursor-theme-3.28.0-3.el8.noarch 42/106 + Verifying : adwaita-icon-theme-3.28.0-3.el8.noarch 43/106 + Verifying : alsa-lib-1.2.10-2.el8.x86_64 44/106 + Verifying : at-spi2-atk-2.26.2-1.el8.x86_64 45/106 + Verifying : at-spi2-core-2.28.0-1.el8.x86_64 46/106 + Verifying : atk-2.28.1-1.el8.x86_64 47/106 + Verifying : cairo-1.15.12-6.el8.x86_64 48/106 + Verifying : cairo-gobject-1.15.12-6.el8.x86_64 49/106 + Verifying : colord-libs-1.4.2-1.el8.x86_64 50/106 + Verifying : copy-jdk-configs-4.0-2.el8.noarch 51/106 + Verifying : dconf-0.28.0-4.0.1.el8.x86_64 52/106 + Verifying : fribidi-1.0.4-9.el8.x86_64 53/106 + Verifying : gdk-pixbuf2-modules-2.36.12-6.el8_10.x86_64 54/106 + Verifying : graphite2-1.3.10-10.el8.x86_64 55/106 + Verifying : gtk-update-icon-cache-3.22.30-11.el8.x86_64 56/106 + Verifying : gtk3-3.22.30-11.el8.x86_64 57/106 + Verifying : harfbuzz-1.7.5-4.el8.x86_64 58/106 + Verifying : hicolor-icon-theme-0.17-2.el8.noarch 59/106 + Verifying : jasper-libs-2.0.14-5.el8.x86_64 60/106 + Verifying : java-11-openjdk-1:11.0.24.0.8-3.0.1.el8.x86_64 61/106 + Verifying : java-11-openjdk-devel-1:11.0.24.0.8-3.0.1.el8.x8 62/106 + Verifying : java-11-openjdk-headless-1:11.0.24.0.8-3.0.1.el8 63/106 + Verifying : javapackages-filesystem-5.3.0-1.module+el8+5136+ 64/106 + Verifying : jbigkit-libs-2.1-14.el8.x86_64 65/106 + Verifying : lcms2-2.9-2.el8.x86_64 66/106 + Verifying : libX11-1.6.8-8.el8.x86_64 67/106 + Verifying : libX11-common-1.6.8-8.el8.noarch 68/106 + Verifying : libXau-1.0.9-3.el8.x86_64 69/106 + Verifying : libXcomposite-0.4.4-14.el8.x86_64 70/106 + Verifying : libXcursor-1.1.15-3.el8.x86_64 71/106 + Verifying : libXdamage-1.1.4-14.el8.x86_64 72/106 + Verifying : libXext-1.3.4-1.el8.x86_64 73/106 + Verifying : libXfixes-5.0.3-7.el8.x86_64 74/106 + Verifying : libXft-2.3.3-1.el8.x86_64 75/106 + Verifying : libXi-1.7.10-1.el8.x86_64 76/106 + Verifying : libXinerama-1.1.4-1.el8.x86_64 77/106 + Verifying : libXrandr-1.5.2-1.el8.x86_64 78/106 + Verifying : libXrender-0.9.10-7.el8.x86_64 79/106 + Verifying : libXtst-1.2.3-7.el8.x86_64 80/106 + Verifying : libdatrie-0.2.9-7.el8.x86_64 81/106 + Verifying : libepoxy-1.5.8-1.el8.x86_64 82/106 + Verifying : libfontenc-1.1.3-8.el8.x86_64 83/106 + Verifying : libjpeg-turbo-1.5.3-12.el8.x86_64 84/106 + Verifying : libthai-0.1.27-2.el8.x86_64 85/106 + Verifying : libtiff-4.0.9-32.el8_10.x86_64 86/106 + Verifying : libwayland-client-1.21.0-1.el8.x86_64 87/106 + Verifying : libwayland-cursor-1.21.0-1.el8.x86_64 88/106 + Verifying : libwayland-egl-1.21.0-1.el8.x86_64 89/106 + Verifying : libxcb-1.13.1-1.el8.x86_64 90/106 + Verifying : libxkbcommon-0.9.1-1.el8.x86_64 91/106 + Verifying : lua-5.3.4-12.el8.x86_64 92/106 + Verifying : nspr-4.35.0-1.el8_8.x86_64 93/106 + Verifying : nss-3.90.0-7.el8_10.x86_64 94/106 + Verifying : nss-softokn-3.90.0-7.el8_10.x86_64 95/106 + Verifying : nss-softokn-freebl-3.90.0-7.el8_10.x86_64 96/106 + Verifying : nss-sysinit-3.90.0-7.el8_10.x86_64 97/106 + Verifying : nss-util-3.90.0-7.el8_10.x86_64 98/106 + Verifying : pango-1.42.4-8.el8.x86_64 99/106 + Verifying : pixman-0.38.4-4.el8.x86_64 100/106 + Verifying : rest-0.8.1-2.el8.x86_64 101/106 + Verifying : ttmkfdir-3.0.9-54.el8.x86_64 102/106 + Verifying : tzdata-java-2024a-1.0.1.el8.noarch 103/106 + Verifying : xkeyboard-config-2.28-1.el8.noarch 104/106 + Verifying : xorg-x11-font-utils-1:7.5-41.el8.x86_64 105/106 + Verifying : xorg-x11-fonts-Type1-7.5-19.el8.noarch 106/106 Installed: - alsa-lib-1.2.6.1-3.el8.x86_64 - avahi-libs-0.7-20.el8.x86_64 + abattis-cantarell-fonts-0.0.25-6.el8.noarch + adwaita-cursor-theme-3.28.0-3.el8.noarch + adwaita-icon-theme-3.28.0-3.el8.noarch + alsa-lib-1.2.10-2.el8.x86_64 + at-spi2-atk-2.26.2-1.el8.x86_64 + at-spi2-core-2.28.0-1.el8.x86_64 + atk-2.28.1-1.el8.x86_64 + avahi-libs-0.7-27.el8.x86_64 + cairo-1.15.12-6.el8.x86_64 + cairo-gobject-1.15.12-6.el8.x86_64 + colord-libs-1.4.2-1.el8.x86_64 copy-jdk-configs-4.0-2.el8.noarch - crypto-policies-scripts-20211116-1.gitae470d6.el8.noarch - cups-libs-1:2.2.6-45.el8_6.2.x86_64 - giflib-5.1.4-3.el8.x86_64 + cpio-2.12-11.el8.x86_64 + crypto-policies-scripts-20230731-1.git3177e06.el8.noarch + cups-libs-1:2.2.6-60.el8_10.x86_64 + dconf-0.28.0-4.0.1.el8.x86_64 + dejavu-sans-mono-fonts-2.35-7.el8.noarch + dracut-049-233.git20240115.0.1.el8.x86_64 + file-5.33-25.el8.x86_64 + fribidi-1.0.4-9.el8.x86_64 + gdk-pixbuf2-2.36.12-6.el8_10.x86_64 + gdk-pixbuf2-modules-2.36.12-6.el8_10.x86_64 + gettext-0.19.8.1-17.el8.x86_64 + gettext-libs-0.19.8.1-17.el8.x86_64 + glib-networking-2.56.1-1.1.el8.x86_64 graphite2-1.3.10-10.el8.x86_64 - harfbuzz-1.7.5-3.el8.x86_64 - java-11-openjdk-1:11.0.16.1.1-1.el8_6.x86_64 - java-11-openjdk-devel-1:11.0.16.1.1-1.el8_6.x86_64 - java-11-openjdk-headless-1:11.0.16.1.1-1.el8_6.x86_64 + grub2-common-1:2.02-156.0.2.el8.noarch + grub2-tools-1:2.02-156.0.2.el8.x86_64 + grub2-tools-minimal-1:2.02-156.0.2.el8.x86_64 + grubby-8.40-49.0.2.el8.x86_64 + gsettings-desktop-schemas-3.32.0-6.el8.x86_64 + gtk-update-icon-cache-3.22.30-11.el8.x86_64 + gtk3-3.22.30-11.el8.x86_64 + hardlink-1:1.3-6.el8.x86_64 + harfbuzz-1.7.5-4.el8.x86_64 + hicolor-icon-theme-0.17-2.el8.noarch + jasper-libs-2.0.14-5.el8.x86_64 + java-11-openjdk-1:11.0.24.0.8-3.0.1.el8.x86_64 + java-11-openjdk-devel-1:11.0.24.0.8-3.0.1.el8.x86_64 + java-11-openjdk-headless-1:11.0.24.0.8-3.0.1.el8.x86_64 javapackages-filesystem-5.3.0-1.module+el8+5136+7ff78f74.noarch + jbigkit-libs-2.1-14.el8.x86_64 + json-glib-1.4.4-1.el8.x86_64 + kbd-2.0.4-11.el8.x86_64 + kbd-legacy-2.0.4-11.el8.noarch + kbd-misc-2.0.4-11.el8.noarch lcms2-2.9-2.el8.x86_64 - libX11-1.6.8-5.el8.x86_64 - libX11-common-1.6.8-5.el8.noarch + libX11-1.6.8-8.el8.x86_64 + libX11-common-1.6.8-8.el8.noarch libXau-1.0.9-3.el8.x86_64 libXcomposite-0.4.4-14.el8.x86_64 + libXcursor-1.1.15-3.el8.x86_64 + libXdamage-1.1.4-14.el8.x86_64 libXext-1.3.4-1.el8.x86_64 + libXfixes-5.0.3-7.el8.x86_64 + libXft-2.3.3-1.el8.x86_64 libXi-1.7.10-1.el8.x86_64 + libXinerama-1.1.4-1.el8.x86_64 + libXrandr-1.5.2-1.el8.x86_64 libXrender-0.9.10-7.el8.x86_64 libXtst-1.2.3-7.el8.x86_64 + libcroco-0.6.12-4.el8_2.1.x86_64 + libdatrie-0.2.9-7.el8.x86_64 + libepoxy-1.5.8-1.el8.x86_64 libfontenc-1.1.3-8.el8.x86_64 + libgomp-8.5.0-22.0.1.el8_10.x86_64 + libgusb-0.3.0-1.el8.x86_64 libjpeg-turbo-1.5.3-12.el8.x86_64 + libkcapi-1.4.0-2.0.1.el8.x86_64 + libkcapi-hmaccalc-1.4.0-2.0.1.el8.x86_64 + libmodman-2.0.1-17.el8.x86_64 libpkgconf-1.4.2-1.el8.x86_64 + libproxy-0.4.15-5.2.el8.x86_64 + libsoup-2.62.3-5.el8.x86_64 + libthai-0.1.27-2.el8.x86_64 + libtiff-4.0.9-32.el8_10.x86_64 + libwayland-client-1.21.0-1.el8.x86_64 + libwayland-cursor-1.21.0-1.el8.x86_64 + libwayland-egl-1.21.0-1.el8.x86_64 libxcb-1.13.1-1.el8.x86_64 + libxkbcommon-0.9.1-1.el8.x86_64 lksctp-tools-1.0.18-3.el8.x86_64 lua-5.3.4-12.el8.x86_64 - nspr-4.34.0-3.el8_6.x86_64 - nss-3.79.0-10.el8_6.x86_64 - nss-softokn-3.79.0-10.el8_6.x86_64 - nss-softokn-freebl-3.79.0-10.el8_6.x86_64 - nss-sysinit-3.79.0-10.el8_6.x86_64 - nss-util-3.79.0-10.el8_6.x86_64 + memstrack-0.2.5-2.el8.x86_64 + nspr-4.35.0-1.el8_8.x86_64 + nss-3.90.0-7.el8_10.x86_64 + nss-softokn-3.90.0-7.el8_10.x86_64 + nss-softokn-freebl-3.90.0-7.el8_10.x86_64 + nss-sysinit-3.90.0-7.el8_10.x86_64 + nss-util-3.90.0-7.el8_10.x86_64 + os-prober-1.74-9.0.1.el8.x86_64 + pango-1.42.4-8.el8.x86_64 + pigz-2.4-4.el8.x86_64 + pixman-0.38.4-4.el8.x86_64 pkgconf-1.4.2-1.el8.x86_64 pkgconf-m4-1.4.2-1.el8.noarch pkgconf-pkg-config-1.4.2-1.el8.x86_64 + rest-0.8.1-2.el8.x86_64 + shared-mime-info-1.9-4.el8.x86_64 + systemd-udev-239-78.0.4.el8.x86_64 ttmkfdir-3.0.9-54.el8.x86_64 - tzdata-java-2022d-1.el8.noarch + tzdata-java-2024a-1.0.1.el8.noarch + xkeyboard-config-2.28-1.el8.noarch xorg-x11-font-utils-1:7.5-41.el8.x86_64 xorg-x11-fonts-Type1-7.5-19.el8.noarch + xz-5.2.4-4.el8_6.x86_64 Complete! -Last metadata expiration check: 0:00:10 ago on Mon 10 Oct 2022 04:06:28 PM UTC. +Last metadata expiration check: 0:00:23 ago on Tue 20 Aug 2024 08:55:14 AM UTC. +Package iproute-6.2.0-5.el8_9.x86_64 is already installed. Dependencies resolved. -============================================================================================== - Package - Arch Version Repository Size -============================================================================================== -Installing: - ords noarch 22.3.0-7.el8 yum.oracle.com_repo_OracleLinux_OL8_oracle_software_x86_64 87 M -Installing dependencies: - lsof x86_64 4.93.2-1.el8 ol8_baseos_latest 253 k +================================================================================ + Package Architecture Version Repository Size +================================================================================ +Upgrading: + iproute x86_64 6.2.0-6.el8_10 ol8_baseos_latest 853 k Transaction Summary -============================================================================================== -Install 2 Packages +================================================================================ +Upgrade 1 Package -Total download size: 87 M -Installed size: 92 M +Total download size: 853 k Downloading Packages: -(1/2): lsof-4.93.2-1.el8.x86_64.rpm 3.0 MB/s | 253 kB 00:00 -(2/2): ords-22.3.0-7.el8.noarch.rpm 66 MB/s | 87 MB 00:01 +iproute-6.2.0-6.el8_10.x86_64.rpm 4.2 MB/s | 853 kB 00:00 -------------------------------------------------------------------------------- -Total 66 MB/s | 87 MB 00:01 +Total 4.2 MB/s | 853 kB 00:00 Running transaction check Transaction check succeeded. Running transaction test Transaction test succeeded. Running transaction Preparing : 1/1 - Installing : lsof-4.93.2-1.el8.x86_64 1/2 - Running scriptlet: ords-22.3.0-7.el8.noarch 2/2 - Installing : ords-22.3.0-7.el8.noarch 2/2 - Running scriptlet: ords-22.3.0-7.el8.noarch 2/2 -INFO: Before starting ORDS service, run the below command as user oracle: - ords --config /etc/ords/config install + Upgrading : iproute-6.2.0-6.el8_10.x86_64 1/2 + Cleanup : iproute-6.2.0-5.el8_9.x86_64 2/2 + Running scriptlet: iproute-6.2.0-5.el8_9.x86_64 2/2 + Verifying : iproute-6.2.0-6.el8_10.x86_64 1/2 + Verifying : iproute-6.2.0-5.el8_9.x86_64 2/2 - Verifying : lsof-4.93.2-1.el8.x86_64 1/2 - Verifying : ords-22.3.0-7.el8.noarch 2/2 - -Installed: - lsof-4.93.2-1.el8.x86_64 ords-22.3.0-7.el8.noarch +Upgraded: + iproute-6.2.0-6.el8_10.x86_64 -Complete! -Last metadata expiration check: 0:00:15 ago on Mon 10 Oct 2022 04:06:28 PM UTC. -Package iproute-5.15.0-4.el8_6.1.x86_64 is already installed. -Dependencies resolved. -Nothing to do. Complete! 24 files removed -Removing intermediate container d38a69d2cc70 - ---> 3a7b8edb327e -Step 5/10 : RUN mkdir -p $ORDS_HOME/doc_root && mkdir -p $ORDS_HOME/error && mkdir -p $ORDS_HOME/secrets && chmod ug+x $ORDS_HOME/*.sh && groupadd -g 54322 dba && usermod -u 54321 -d /home/oracle -g dba -m -s /bin/bash oracle && chown -R oracle:dba $ORDS_HOME && echo "oracle ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers - ---> Running in 1d05951f8252 -Removing intermediate container 1d05951f8252 - ---> 265cb7ab4f2c -Step 6/10 : USER oracle - ---> Running in 180d432ae42d -Removing intermediate container 180d432ae42d - ---> a9caee3d9426 -Step 7/10 : WORKDIR /home/oracle - ---> Running in bf8ac95c724a -Removing intermediate container bf8ac95c724a - ---> 4623d696e603 -Step 8/10 : VOLUME ["$ORDS_HOME/config/ords"] - ---> Running in 3afce627e4c0 -Removing intermediate container 3afce627e4c0 - ---> 914d4ee42ede -Step 9/10 : EXPOSE 8888 - ---> Running in 13460b132c52 -Removing intermediate container 13460b132c52 - ---> 4c9edba5aade -Step 10/10 : CMD $ORDS_HOME/$RUN_FILE - ---> Running in f97b17d8cea4 -Removing intermediate container f97b17d8cea4 - ---> c8e95aadf5e3 -Successfully built c8e95aadf5e3 +Removing intermediate container fe168b01f3ad + ---> 791878694a50 +Step 5/12 : RUN curl -o /tmp/ords-$ORDSVERSION.el8.noarch.rpm https://yum.oracle.com/repo/OracleLinux/OL8/oracle/software/x86_64/getPackage/ords-$ORDSVERSION.el8.noarch.rpm + ---> Running in 59d7143da358 + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed +100 108M 100 108M 0 0 1440k 0 0:01:16 0:01:16 --:--:-- 1578k +Removing intermediate container 59d7143da358 + ---> 17c4534293e5 +Step 6/12 : RUN rpm -ivh /tmp/ords-$ORDSVERSION.el8.noarch.rpm + ---> Running in 84b1cbffdc51 +Verifying... ######################################## +Preparing... ######################################## +Updating / installing... +ords-23.4.0-8.el8 ######################################## +INFO: Before starting ORDS service, run the below command as user oracle: + ords --config /etc/ords/config install +Removing intermediate container 84b1cbffdc51 + ---> 6e7151b79588 +Step 7/12 : RUN mkdir -p $ORDS_HOME/doc_root && mkdir -p $ORDS_HOME/error && mkdir -p $ORDS_HOME/secrets && chmod ug+x $ORDS_HOME/*.sh && groupadd -g 54322 dba && usermod -u 54321 -d /home/oracle -g dba -m -s /bin/bash oracle && chown -R oracle:dba $ORDS_HOME && echo "oracle ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + ---> Running in 66e5db5f343f +Removing intermediate container 66e5db5f343f + ---> 0523dc897bf4 +Step 8/12 : USER oracle + ---> Running in ffda8495ac77 +Removing intermediate container ffda8495ac77 + ---> 162acd4d0b93 +Step 9/12 : WORKDIR /home/oracle + ---> Running in 8c14310ffbc7 +Removing intermediate container 8c14310ffbc7 + ---> c8dae809e772 +Step 10/12 : VOLUME ["$ORDS_HOME/config/ords"] + ---> Running in ed64548fd997 +Removing intermediate container ed64548fd997 + ---> 22e2c99247b0 +Step 11/12 : EXPOSE 8888 + ---> Running in 921f7c85d61d +Removing intermediate container 921f7c85d61d + ---> e5d503c92224 +Step 12/12 : CMD $ORDS_HOME/$RUN_FILE + ---> Running in cad487298d63 +Removing intermediate container cad487298d63 + ---> fdb17aa242f8 +Successfully built fdb17aa242f8 Successfully tagged oracle/ords-dboper:latest +08:57:18 oracle@mitk01:# diff --git a/docs/multitenant/usecase01/logfiles/cdb_creation.log b/docs/multitenant/usecase01/logfiles/cdb_creation.log new file mode 100644 index 00000000..b4602f54 --- /dev/null +++ b/docs/multitenant/usecase01/logfiles/cdb_creation.log @@ -0,0 +1,357 @@ +/usr/local/go/bin/kubectl logs -f `/usr/local/go/bin/kubectl get pods -n oracle-database-operator-system|grep ords|cut -d ' ' -f 1` -n oracle-database-operator-system +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. +ORDSVERSIN:23.4.0-8 +NOT_INSTALLED=2 + SETUP +==================================================== +CONFIG=/etc/ords/config +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:16 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: db.connectionType was set to: customurl in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:18 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: db.customURL was set to: jdbc:oracle:thin:@(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS))) in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:20 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: security.requestValidationFunction was set to: false in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:22 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: jdbc.MaxLimit was set to: 100 in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:24 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: jdbc.InitialLimit was set to: 50 in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:25 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: error.externalPath was set to: /opt/oracle/ords/error +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:27 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: standalone.access.log was set to: /home/oracle +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:29 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: standalone.https.port was set to: 8888 +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:31 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: standalone.https.cert was set to: /opt/oracle/ords//secrets/tls.crt +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:33 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: standalone.https.cert.key was set to: /opt/oracle/ords//secrets/tls.key +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:35 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: restEnabledSql.active was set to: true in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:37 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: security.verifySSL was set to: true +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:39 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: database.api.enabled was set to: true +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:41 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: plsql.gateway.mode was set to: disabled in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:43 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The global setting named: database.api.management.services.disabled was set to: false +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:45 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: misc.pagination.maxRows was set to: 1000 in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:47 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: db.cdb.adminUser was set to: C##DBAPI_CDB_ADMIN AS SYSDBA in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:49 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +The setting named: db.cdb.adminUser.password was set to: ****** in configuration: default +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:51 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +Created user welcome in file /etc/ords/config/global/credentials +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:53 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +Oracle REST Data Services - Non-Interactive Install + +Retrieving information... +Completed verifying Oracle REST Data Services schema version 23.4.0.r3461619. +Connecting to database user: ORDS_PUBLIC_USER url: jdbc:oracle:thin:@(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS))) +The setting named: db.serviceNameSuffix was set to: in configuration: default +The setting named: db.username was set to: ORDS_PUBLIC_USER in configuration: default +The setting named: db.password was set to: ****** in configuration: default +The setting named: security.requestValidationFunction was set to: ords_util.authorize_plsql_gateway in configuration: default +2024-08-20T07:21:57.563Z INFO Oracle REST Data Services schema version 23.4.0.r3461619 is installed. +2024-08-20T07:21:57.565Z INFO To run in standalone mode, use the ords serve command: +2024-08-20T07:21:57.565Z INFO ords --config /etc/ords/config serve +2024-08-20T07:21:57.565Z INFO Visit the ORDS Documentation to access tutorials, developer guides and more to help you get started with the new ORDS Command Line Interface (http://oracle.com/rest). +Picked up _JAVA_OPTIONS: -Xms1126M -Xmx1126M + +ORDS: Release 23.4 Production on Tue Aug 20 07:21:59 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +2024-08-20T07:21:59.739Z INFO HTTP and HTTP/2 cleartext listening on host: 0.0.0.0 port: 8080 +2024-08-20T07:21:59.741Z INFO HTTPS and HTTPS/2 listening on host: 0.0.0.0 port: 8888 +2024-08-20T07:21:59.765Z INFO Disabling document root because the specified folder does not exist: /etc/ords/config/global/doc_root +2024-08-20T07:21:59.765Z INFO Default forwarding from / to contextRoot configured. +2024-08-20T07:22:05.313Z INFO Configuration properties for: |default|lo| +db.serviceNameSuffix= +java.specification.version=22 +conf.use.wallet=true +database.api.management.services.disabled=false +sun.jnu.encoding=UTF-8 +user.region=US +java.class.path=/opt/oracle/ords/ords.war +java.vm.vendor=Oracle Corporation +standalone.https.cert.key=/opt/oracle/ords//secrets/tls.key +sun.arch.data.model=64 +nashorn.args=--no-deprecation-warning +java.vendor.url=https://java.oracle.com/ +resource.templates.enabled=false +user.timezone=UTC +java.vm.specification.version=22 +os.name=Linux +sun.java.launcher=SUN_STANDARD +user.country=US +sun.boot.library.path=/usr/java/jdk-22/lib +sun.java.command=/opt/oracle/ords/ords.war --config /etc/ords/config serve --port 8888 --secure +jdk.debug=release +sun.cpu.endian=little +user.home=/home/oracle +oracle.dbtools.launcher.executable.jar.path=/opt/oracle/ords/ords.war +user.language=en +db.cdb.adminUser.password=****** +java.specification.vendor=Oracle Corporation +java.version.date=2024-07-16 +database.api.enabled=true +java.home=/usr/java/jdk-22 +db.username=ORDS_PUBLIC_USER +file.separator=/ +java.vm.compressedOopsMode=32-bit +line.separator= + +restEnabledSql.active=true +java.specification.name=Java Platform API Specification +java.vm.specification.vendor=Oracle Corporation +java.awt.headless=true +standalone.https.cert=/opt/oracle/ords//secrets/tls.crt +db.password=****** +sun.management.compiler=HotSpot 64-Bit Tiered Compilers +security.requestValidationFunction=ords_util.authorize_plsql_gateway +misc.pagination.maxRows=1000 +java.runtime.version=22.0.2+9-70 +user.name=oracle +error.externalPath=/opt/oracle/ords/error +stdout.encoding=UTF-8 +path.separator=: +db.cdb.adminUser=C##DBAPI_CDB_ADMIN AS SYSDBA +os.version=5.4.17-2136.329.3.1.el7uek.x86_64 +java.runtime.name=Java(TM) SE Runtime Environment +file.encoding=UTF-8 +plsql.gateway.mode=disabled +security.verifySSL=true +standalone.https.port=8888 +java.vm.name=Java HotSpot(TM) 64-Bit Server VM +java.vendor.url.bug=https://bugreport.java.com/bugreport/ +java.io.tmpdir=/tmp +oracle.dbtools.cmdline.ShellCommand=ords +java.version=22.0.2 +user.dir=/home/oracle +os.arch=amd64 +java.vm.specification.name=Java Virtual Machine Specification +jdbc.MaxLimit=100 +oracle.dbtools.cmdline.home=/opt/oracle/ords +native.encoding=UTF-8 +java.library.path=/usr/java/packages/lib:/usr/lib64:/lib64:/lib:/usr/lib +java.vendor=Oracle Corporation +java.vm.info=mixed mode, sharing +stderr.encoding=UTF-8 +java.vm.version=22.0.2+9-70 +sun.io.unicode.encoding=UnicodeLittle +jdbc.InitialLimit=50 +db.connectionType=customurl +java.class.version=66.0 +db.customURL=jdbc:oracle:thin:@(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS))) +standalone.access.log=/home/oracle + +2024-08-20T07:22:09.268Z INFO + +Mapped local pools from /etc/ords/config/databases: + /ords/ => default => VALID + + +2024-08-20T07:22:09.414Z INFO Oracle REST Data Services initialized +Oracle REST Data Services version : 23.4.0.r3461619 +Oracle REST Data Services server info: jetty/10.0.18 +Oracle REST Data Services java info: Java HotSpot(TM) 64-Bit Server VM 22.0.2+9-70 + diff --git a/docs/multitenant/usecase01/logfiles/openssl_execution.log b/docs/multitenant/usecase01/logfiles/openssl_execution.log index 30a1c5d4..e3915a21 100644 --- a/docs/multitenant/usecase01/logfiles/openssl_execution.log +++ b/docs/multitenant/usecase01/logfiles/openssl_execution.log @@ -1,22 +1,19 @@ +CREATING TLS CERTIFICATES /usr/bin/openssl genrsa -out ca.key 2048 -Generating RSA private key, 2048 bit long modulus -......................................................................................................................................................................................+++ -...................................+++ -e is 65537 (0x10001) -/usr/bin/openssl req -new -x509 -days 365 -key ca.key -subj "/C=CN/ST=GD/L=SZ/O=oracle, Inc./CN=oracle Root CA" -out ca.crt -/usr/bin/openssl req -newkey rsa:2048 -nodes -keyout tls.key -subj "/C=CN/ST=GD/L=SZ/O=oracle, Inc./CN=cdb-dev-ords" -out server.csr -Generating a 2048 bit RSA private key -...................................+++ -........................................+++ +Generating RSA private key, 2048 bit long modulus (2 primes) +......................+++++ +..................................................+++++ +e is 65537 (0x010001) +/usr/bin/openssl req -new -x509 -days 365 -key ca.key -subj "/C=US/ST=California/L=SanFrancisco/O=oracle /CN=cdb-dev-ords.oracle-database-operator-system /CN=localhost Root CA " -out ca.crt +/usr/bin/openssl req -newkey rsa:2048 -nodes -keyout tls.key -subj "/C=US/ST=California/L=SanFrancisco/O=oracle /CN=cdb-dev-ords.oracle-database-operator-system /CN=localhost" -out server.csr +Generating a RSA private key +...........+++++ +...........................................+++++ writing new private key to 'tls.key' ----- -/usr/bin/echo "subjectAltName=DNS:cdb-dev-ords,DNS:www.example.com" > extfile.txt +/usr/bin/echo "subjectAltName=DNS:cdb-dev-ords.oracle-database-operator-system,DNS:www.example.com" > extfile.txt /usr/bin/openssl x509 -req -extfile extfile.txt -days 365 -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out tls.crt Signature ok -subject=/C=CN/ST=GD/L=SZ/O=oracle, Inc./CN=cdb-dev-ords +subject=C = US, ST = California, L = SanFrancisco, O = "oracle ", CN = "cdb-dev-ords.oracle-database-operator-system ", CN = localhost Getting CA Private Key -/usr/bin/kubectl create secret tls db-tls --key="tls.key" --cert="tls.crt" -n oracle-database-operator-system -secret/db-tls created -/usr/bin/kubectl create secret generic db-ca --from-file="ca.crt" -n oracle-database-operator-system -secret/db-ca created diff --git a/docs/multitenant/usecase01/logfiles/ordsconfig.log b/docs/multitenant/usecase01/logfiles/ordsconfig.log index ad5e7bab..b787b752 100644 --- a/docs/multitenant/usecase01/logfiles/ordsconfig.log +++ b/docs/multitenant/usecase01/logfiles/ordsconfig.log @@ -1,35 +1,39 @@ -: Release 22.3 Production on Tue Oct 11 12:51:50 2022 +ORDS: Release 23.4 Production on Tue Aug 20 07:48:44 2024 -Copyright (c) 2010, 2022, Oracle. +Copyright (c) 2010, 2024, Oracle. Configuration: /etc/ords/config/ Database pool: default -Setting Value Source ------------------------------------------ -------------------------------------- ----------- -database.api.enabled true Global -database.api.management.services.disabled false Global -db.cdb.adminUser C##DBAPI_CDB_ADMIN AS SYSDBA Pool -db.cdb.adminUser.password ****** Pool Wallet -db.connectionType basic Pool -db.hostname racnode1.testrac.com Pool -db.password ****** Pool Wallet -db.port 1521 Pool -db.serviceNameSuffix Pool -db.servicename TESTORDS Pool -db.username ORDS_PUBLIC_USER Pool -error.externalPath /opt/oracle/ords/error Global -jdbc.InitialLimit 50 Pool -jdbc.MaxLimit 100 Pool -misc.pagination.maxRows 1000 Pool -plsql.gateway.mode proxied Pool -restEnabledSql.active true Pool -security.requestValidationFunction wwv_flow_epg_include_modules.authorize Pool -security.verifySSL true Global -standalone.access.log /home/oracle Global -standalone.https.cert /opt/oracle/ords//secrets/tls.crt Global -standalone.https.cert.key /opt/oracle/ords//secrets/tls.key Global -standalone.https.port 8888 Global +Setting Value Source +----------------------------------------- -------------------------------------------------- ----------- +database.api.enabled true Global +database.api.management.services.disabled false Global +db.cdb.adminUser C##DBAPI_CDB_ADMIN AS SYSDBA Pool +db.cdb.adminUser.password ****** Pool Wallet +db.connectionType customurl Pool +db.customURL jdbc:oracle:thin:@(DESCRIPTION=(CONNECT_TIMEOUT=90 Pool + )(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNEC + T_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL= + TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONL + Y))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST= + scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNEC + T_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS))) +db.password ****** Pool Wallet +db.serviceNameSuffix Pool +db.username ORDS_PUBLIC_USER Pool +error.externalPath /opt/oracle/ords/error Global +jdbc.InitialLimit 50 Pool +jdbc.MaxLimit 100 Pool +misc.pagination.maxRows 1000 Pool +plsql.gateway.mode disabled Pool +restEnabledSql.active true Pool +security.requestValidationFunction ords_util.authorize_plsql_gateway Pool +security.verifySSL true Global +standalone.access.log /home/oracle Global +standalone.https.cert /opt/oracle/ords//secrets/tls.crt Global +standalone.https.cert.key /opt/oracle/ords//secrets/tls.key Global +standalone.https.port 8888 Global diff --git a/docs/multitenant/usecase01/logfiles/tagandpush.log b/docs/multitenant/usecase01/logfiles/tagandpush.log new file mode 100644 index 00000000..232d5bb2 --- /dev/null +++ b/docs/multitenant/usecase01/logfiles/tagandpush.log @@ -0,0 +1,14 @@ +/usr/bin/docker tag oracle/ords-dboper:latest [.......]/ords-dboper:latest + +/usr/bin/docker push [your container registry]/ords-dboper:latest +The push refers to repository [your container registry] +0405aac3af1c: Pushed +6be46e8e1e21: Pushed +c9884830a66d: Pushed +a46244557bb9: Pushing [===========================> ] 261.8MB/469.9MB +f988845e261e: Pushed +fe07ec0b1f5a: Layer already exists +2ac63de5f950: Layer already exists +386cd7a64c01: Layer already exists +826c69252b8b: Layer already exists + diff --git a/docs/multitenant/usecase01/logfiles/testapi.log b/docs/multitenant/usecase01/logfiles/testapi.log index 4c95b457..cb42ecc3 100644 --- a/docs/multitenant/usecase01/logfiles/testapi.log +++ b/docs/multitenant/usecase01/logfiles/testapi.log @@ -1,6 +1,7 @@ -* Trying 127.0.0.1... +kubectl exec -it `kubectl get pods -n oracle-database-operator-system|grep ords|cut -d ' ' -f 1` -n oracle-database-operator-system -i -t -- /usr/bin/curl -sSkv -k -X GET https://localhost:8888/ords/_/db-api/stable/metadata-catalog/ +* Trying ::1... * TCP_NODELAY set -* Connected to localhost (127.0.0.1) port 8888 (#0) +* Connected to localhost (::1) port 8888 (#0) * ALPN, offering h2 * ALPN, offering http/1.1 * successfully set certificate verify locations: @@ -19,10 +20,10 @@ * SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384 * ALPN, server accepted to use h2 * Server certificate: -* subject: C=CN; ST=GD; L=SZ; O=oracle, Inc.; CN=cdb-dev-ords -* start date: Oct 11 07:44:38 2022 GMT -* expire date: Oct 11 07:44:38 2023 GMT -* issuer: C=CN; ST=GD; L=SZ; O=oracle, Inc.; CN=oracle Root CA +* subject: C=US; ST=California; L=SanFrancisco; O=oracle ; CN=cdb-dev-ords.oracle-database-operator-system ; CN=localhost +* start date: Aug 20 07:14:04 2024 GMT +* expire date: Aug 20 07:14:04 2025 GMT +* issuer: C=US; ST=California; L=SanFrancisco; O=oracle ; CN=cdb-dev-ords.oracle-database-operator-system ; CN=localhost Root CA * SSL certificate verify result: unable to get local issuer certificate (20), continuing anyway. * Using HTTP2, server supports multi-use * Connection state changed (HTTP/2 confirmed) @@ -30,7 +31,7 @@ * TLSv1.3 (OUT), TLS app data, [no content] (0): * TLSv1.3 (OUT), TLS app data, [no content] (0): * TLSv1.3 (OUT), TLS app data, [no content] (0): -* Using Stream ID: 1 (easy handle 0x564be7b0b970) +* Using Stream ID: 1 (easy handle 0x55d14a7dea90) * TLSv1.3 (OUT), TLS app data, [no content] (0): > GET /ords/_/db-api/stable/metadata-catalog/ HTTP/2 > Host: localhost:8888 @@ -46,4 +47,16 @@ * TLSv1.3 (OUT), TLS app data, [no content] (0): * TLSv1.3 (IN), TLS app data, [no content] (0): * TLSv1.3 (IN), TLS app data, [no content] (0): - +< HTTP/2 200 +< content-type: application/json +< +* TLSv1.3 (IN), TLS handshake, [no content] (0): +* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4): +* TLSv1.3 (IN), TLS app data, [no content] (0): +* TLSv1.3 (IN), TLS app data, [no content] (0): +* TLSv1.3 (IN), TLS app data, [no content] (0): +* TLSv1.3 (IN), TLS app data, [no content] (0): +* TLSv1.3 (IN), TLS app data, [no content] (0): +* TLSv1.3 (IN), TLS app data, [no content] (0): +* Connection #0 to host localhost left intact +{"items":[{"name":"default","links":[{"rel":"canonical","href":"https://localhost:8888/ords/_/db-api/stable/metadata-catalog/openapi.json","mediaType":"application/vnd.oai.openapi+json;version=3.0"}]}],"links":[{"rel":"self","href":"https://localhost:8888/ords/_/db-api/stable/metadata-catalog/"},{"rel":"describes","href":"https://localhost:8888/ords/_/db-api/stable/"}]} diff --git a/docs/multitenant/usecase01/makefile b/docs/multitenant/usecase01/makefile index e468ef62..d4176c75 100644 --- a/docs/multitenant/usecase01/makefile +++ b/docs/multitenant/usecase01/makefile @@ -109,6 +109,8 @@ # | Before testing step13 delete the crd: | # | kubectl delete pdb pdb1 -n oracle-database-operator-system | # +---------------------------------------------------------------------------+ +# |step14 | delete pdb | +# +-----------------------------+---------------------------------------------+ # | DIAGNOSTIC TARGETS | # +-----------------------------+---------------------------------------------+ # | dump | Dump pods info into a file | @@ -137,6 +139,7 @@ ORDSPORT=8888 MAKE=/usr/bin/make DOCKERFILE=../../../ords/Dockerfile RUNSCRIPT=../../../ords/runOrdsSSL.sh +ORDSIMGDIR=../../../ords RM=/usr/bin/rm CP=/usr/bin/cp ECHO=/usr/bin/echo @@ -170,26 +173,13 @@ step10: pdb step11: close step12: open step13: map +step14: delete checkstep9: checkcdb createimage: - $(CP) $(DOCKERFILE) . - $(CP) $(RUNSCRIPT) . - @echo "BUILDING CDB IMAGES" - @if [[ ! -f ./Dockerfile ]]; \ - then\ - echo "DOCKERFILE DOES NOT EXISTS";\ - exit 1; \ - fi; - @if [[ ! -f ./runOrdsSSL.sh ]]; \ - then\ - echo "DOCKERFILE DOES NOT EXISTS";\ - exit 1; \ - fi; - $(DOCKER) build -t $(IMAGE) . - $(RM) ./Dockerfile ./runOrdsSSL.sh + $(DOCKER) build -t $(IMAGE) $(ORDSIMGDIR) tagimage: @echo "TAG IMAGE" @@ -217,9 +207,9 @@ dboperator: tlscert: @echo "CREATING TLS CERTIFICATES" $(OPENSSL) genrsa -out ca.key 2048 - $(OPENSSL) req -new -x509 -days 365 -key ca.key -subj "/C=US/ST=California/L=SanFrancisco/O=$(COMPANY) /CN=$(RESTPREFIX)-$(REST_SERVER) /CN=$(LOCALHOST) Root CA " -out ca.crt - $(OPENSSL) req -newkey rsa:2048 -nodes -keyout $(SKEY) -subj "/C=US/ST=California/L=SanFrancisco/O=$(COMPANY) /CN=$(RESTPREFIX)-$(REST_SERVER) /CN=$(LOCALHOST)" -out server.csr - $(ECHO) "subjectAltName=DNS:cdb-dev-$(REST_SERVER),DNS:www.example.com" > extfile.txt + $(OPENSSL) req -new -x509 -days 365 -key ca.key -subj "/C=US/ST=California/L=SanFrancisco/O=$(COMPANY) /CN=$(RESTPREFIX)-$(REST_SERVER).$(NAMESPACE) /CN=$(LOCALHOST) Root CA " -out ca.crt + $(OPENSSL) req -newkey rsa:2048 -nodes -keyout $(SKEY) -subj "/C=US/ST=California/L=SanFrancisco/O=$(COMPANY) /CN=$(RESTPREFIX)-$(REST_SERVER).$(NAMESPACE) /CN=$(LOCALHOST)" -out server.csr + $(ECHO) "subjectAltName=DNS:$(RESTPREFIX)-$(REST_SERVER).$(NAMESPACE),DNS:www.example.com" > extfile.txt $(OPENSSL) x509 -req -extfile extfile.txt -days 365 -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out $(SCRT) tlssecret: @@ -255,6 +245,9 @@ map: checkpdb: $(KUBECTL) get pdbs -n $(NAMESPACE) +delete: + $(KUBECTL) apply -f pdb_delete.yaml + dump: @$(eval TMPSP := $(shell date "+%y%m%d%H%M%S" )) @$(eval DIAGFILE := ./opdmp.$(TMPSP)) diff --git a/docs/multitenant/usecase01/pdb_create.yaml b/docs/multitenant/usecase01/pdb_create.yaml index 7953118f..be3581ad 100644 --- a/docs/multitenant/usecase01/pdb_create.yaml +++ b/docs/multitenant/usecase01/pdb_create.yaml @@ -43,4 +43,5 @@ spec: totalSize: "1G" tempSize: "100M" action: "Create" + assertivePdbDeletion: true diff --git a/docs/multitenant/usecase01/pdb_delete.yaml b/docs/multitenant/usecase01/pdb_delete.yaml new file mode 100644 index 00000000..c22b546a --- /dev/null +++ b/docs/multitenant/usecase01/pdb_delete.yaml @@ -0,0 +1,34 @@ +apiVersion: database.oracle.com/v1alpha1 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + pdbName: "pdbdev" + action: "Delete" + dropAction: "INCLUDING" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "pdb1-secret" + key: "webserver_user" + webServerPwd: + secret: + secretName: "pdb1-secret" + key: "webserver_pwd" + diff --git a/docs/multitenant/usecase01/pdb_map.yaml b/docs/multitenant/usecase01/pdb_map.yaml index cd6d4ffb..3300a7fa 100644 --- a/docs/multitenant/usecase01/pdb_map.yaml +++ b/docs/multitenant/usecase01/pdb_map.yaml @@ -42,3 +42,4 @@ spec: totalSize: "1G" tempSize: "100M" action: "Map" + assertivePdbDeletion: true diff --git a/docs/multitenant/usecase01/server.csr b/docs/multitenant/usecase01/server.csr new file mode 100644 index 00000000..e308d301 --- /dev/null +++ b/docs/multitenant/usecase01/server.csr @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIC3TCCAcUCAQAwgZcxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlh +MRUwEwYDVQQHDAxTYW5GcmFuY2lzY28xEDAOBgNVBAoMB29yYWNsZSAxNjA0BgNV +BAMMLWNkYi1kZXYtb3Jkcy5vcmFjbGUtZGF0YWJhc2Utb3BlcmF0b3Itc3lzdGVt +IDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAm9nlNSQNsPTVqH57MkWKZEyaVtzVKQ8Z3oDK6hWXfB24p0jVj6sTOJkf +NVAxnqmU8DpW3odpbU6qWe/n+B5vJpqdXUGdsq9NKyus2fGb/xf1UnskpA2FUuWZ +o3upyCFxDAOvE4eZUzlxIn+54XXaNAdQiU9E8VXPr5YxrvZ15T/xCXLtJPs/RCOF +cJ8+gvZGcjMbdP16auJDVWZzBaur3eKbiHN7LXNCCRzGO++dv0kGY8vH7MyFfgp3 +qYBiSHS3WDiFUJjYIvfa8lLfP1hnlCyHn8TnU9gjGjmd1YcccSKqWIAT24wPUKVU +Lme4n91jxDPp7g8nRtDw0Smj9gYCtQIDAQABoAAwDQYJKoZIhvcNAQELBQADggEB +AGOG/9IJJRvT2JLcuzE5Arai1XHc6Jh65iuDRqXQav47Bz38FFF2gZNO69gzDmhq +6k7tie+5bPcAHuuJZ0dAa71a9SLjKl+XNkkI0vS6te6OK3DCVUoMqNCk5VdwrJw0 +RORbKUwgLEG6mu80Gc/6wCdeR/36hoYTMeNPjm6M9e+X5ppsXqxCNsgDxasJFT82 +FejuJE2sZ6RCradlDToUHNS1dMLoW0WAIISqOmrDvEI6snm9ZZr3Sxo1auEtpI6v +NllBM4AgEghy/2mAtke+By4WHCfXBpxEGv9S7ATqJHYrR5Qa3nwx0eojWW1vmn0/ +aEzslX1tAH6oz2jA6QZ0sNo= +-----END CERTIFICATE REQUEST----- diff --git a/docs/multitenant/usecase01/tls.crt b/docs/multitenant/usecase01/tls.crt new file mode 100644 index 00000000..6bf8aef4 --- /dev/null +++ b/docs/multitenant/usecase01/tls.crt @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEFDCCAvygAwIBAgIUd9l6tMS21ak3e4S0VdPhY0jG3gQwDQYJKoZIhvcNAQEL +BQAwgaExCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRUwEwYDVQQH +DAxTYW5GcmFuY2lzY28xEDAOBgNVBAoMB29yYWNsZSAxNjA0BgNVBAMMLWNkYi1k +ZXYtb3Jkcy5vcmFjbGUtZGF0YWJhc2Utb3BlcmF0b3Itc3lzdGVtIDEcMBoGA1UE +AwwTbG9jYWxob3N0ICBSb290IENBIDAeFw0yNDA4MTIxNTMyMzVaFw0yNTA4MTIx +NTMyMzVaMIGXMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEVMBMG +A1UEBwwMU2FuRnJhbmNpc2NvMRAwDgYDVQQKDAdvcmFjbGUgMTYwNAYDVQQDDC1j +ZGItZGV2LW9yZHMub3JhY2xlLWRhdGFiYXNlLW9wZXJhdG9yLXN5c3RlbSAxEjAQ +BgNVBAMMCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AJvZ5TUkDbD01ah+ezJFimRMmlbc1SkPGd6AyuoVl3wduKdI1Y+rEziZHzVQMZ6p +lPA6Vt6HaW1Oqlnv5/gebyaanV1BnbKvTSsrrNnxm/8X9VJ7JKQNhVLlmaN7qcgh +cQwDrxOHmVM5cSJ/ueF12jQHUIlPRPFVz6+WMa72deU/8Qly7ST7P0QjhXCfPoL2 +RnIzG3T9emriQ1VmcwWrq93im4hzey1zQgkcxjvvnb9JBmPLx+zMhX4Kd6mAYkh0 +t1g4hVCY2CL32vJS3z9YZ5Qsh5/E51PYIxo5ndWHHHEiqliAE9uMD1ClVC5nuJ/d +Y8Qz6e4PJ0bQ8NEpo/YGArUCAwEAAaNMMEowSAYDVR0RBEEwP4IsY2RiLWRldi1v +cmRzLm9yYWNsZS1kYXRhYmFzZS1vcGVyYXRvci1zeXN0ZW2CD3d3dy5leGFtcGxl +LmNvbTANBgkqhkiG9w0BAQsFAAOCAQEAh7Lsu2ITS6Bc2q/Ef4No5Us0Vo9BWKoL +AlrfQPjsv1erMGsyEEyZ0Cg8l3QrXlscQ1ESvx0BnRGjoqZGE4+PoVZTEYSkokXP +aAr69epPzXQRyyAGCg5GeL6IFAj1AzqJGNnKOrPaLpcTri4MboiWmW+MHmgLdyPK +iwl8bNa8841nK/L/m6QET15BI+MIAvn7pgcpztum5jmkB+eceXzXnKUGg77TaFiX +bXqVBR4EvexC4DgUfQJI4zJLFdcH/GHxCpaaXNjbXeVz1ZK/qo2TCrXp2UXVrznU +9VTUuCaQA2VYZCitvAbupt+1OvMFYhWiIAroJSmzrvH4oK+IXgY6GA== +-----END CERTIFICATE----- diff --git a/docs/multitenant/usecase01/tls.key b/docs/multitenant/usecase01/tls.key new file mode 100644 index 00000000..666c5639 --- /dev/null +++ b/docs/multitenant/usecase01/tls.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCb2eU1JA2w9NWo +fnsyRYpkTJpW3NUpDxnegMrqFZd8HbinSNWPqxM4mR81UDGeqZTwOlbeh2ltTqpZ +7+f4Hm8mmp1dQZ2yr00rK6zZ8Zv/F/VSeySkDYVS5Zmje6nIIXEMA68Th5lTOXEi +f7nhddo0B1CJT0TxVc+vljGu9nXlP/EJcu0k+z9EI4Vwnz6C9kZyMxt0/Xpq4kNV +ZnMFq6vd4puIc3stc0IJHMY7752/SQZjy8fszIV+CnepgGJIdLdYOIVQmNgi99ry +Ut8/WGeULIefxOdT2CMaOZ3VhxxxIqpYgBPbjA9QpVQuZ7if3WPEM+nuDydG0PDR +KaP2BgK1AgMBAAECggEAKUwl1l0FW7yk2Q8a6glPUKCTzSybN1QPEMyj+D9ccsEV +aw57uKQmZbr9cA0d+OMK2lU7K6BKKXLM5SQTHcZCwcH6rPl0JiMZmbTrCp1hLslU +clS7MtV6XKsGeTGNncBuyjY3sD8gO9NezTt3L+0gsuS1TI06wZBxhh+QbsJUHzjW +bC3mNjD4SqXree4Snp05nlFaT2s2isIjj25mKDwBu8IX0BN2VjsaSiQcjb8Dmzmu +42Xh7bcWBebns8Ehuq9TIl6ZjQht+pmVOMlB862baVpW/9CxkknzM+UQhIkXTSJk +Jt/mGeO89V4/Zh2N4ixIOE1hw87EvRFBoYh2VF58QQKBgQDMujXYblh+eEdsB1LG +kY0LerFHuQgdzifYmjPl0jtBsWDmh5i6q9PRUs2JZ/Fsq4QMQ8SLinGzaIBq5FKr +CL067X5blrFA9H0D6exJI3iHBTQpeMFwtqvu3j+zpCmgzonaUDQrczUpc0hxU7YI +/jhDe9LSWknPrzzMoWWKuy0sTQKBgQDC4g8F2krqm9Q5ug8bRKTAvMrY0skFIwrP +5LXBq9C8YCnLnT4S4tYQfbnWaBeG7YpkkmkZe30c9MUjsr1OHZbo+jlxHBU+oRYZ +e1j0UorVGt7FfNe/zjW0fLd72CBO741EDvV6pVeItkAwH6P5/cbRu085dwvyFbxv +JmOaYddECQKBgQCuid6YG1NE10SE3CV89uAZtktny18ZEgY0ixrNx5MPaaskPtw9 +4Xofjol+qOhR7lQQpMHu+WQAQYqiFvBHspapo4pDiVCrAQWIDamNnTkHW69h3/qD +HqmsZzxF6iI3X351akVf+cOMCCXtwCGEvz+2gN12ytT8w/iAuOS6BuP3TQKBgBlf +v57+diSn13EQtajSPjVOH4ctorjFgEHjQHsP+OSeDLMTLSLeYArTo9+zu+R4hz1j +BsYnmvmrMQPd4OIL3jtFYTdF9coqxSraMZHWMXdfwUOrZpf1rG5skqNQV5yPejAz +Vmj6oDQPrrnVVM9W6I0kO0N7KZYCmH9MW0mdlZ6pAoGAB60f2sk35VUBpvh7qzTY +70WDbNnCCU3I3KZ7LCUwUPWzGLQwMXRlAb5ZMheT/SGPChX4QXCNUCjXkR3Am3NO +yURHqZIRy0bwZRVjYnlCtc9YQ8pB0isZ1z2a9FXRD75o2WboFZ+VsG0FU81IE2ZO +gW802gT76NRnz851B7/nFNs= +-----END PRIVATE KEY----- diff --git a/docs/multitenant/usecase02/README.md b/docs/multitenant/usecase02/README.md index 0b060df7..c434271f 100644 --- a/docs/multitenant/usecase02/README.md +++ b/docs/multitenant/usecase02/README.md @@ -13,7 +13,7 @@ > ☞ The examples of this folder are based on single namespace **oracle-database-operator-system** -This page explains how to plug and unplug database a pdb; it assumes that you have already configured a pluggable database (see usecase01) +This page explains how to plug and unplug database a pdb; it assumes that you have already configured a pluggable database (see [usecase01](../usecase01/README.md)) The following table reports the parameters required to configure and use oracle multi tenant controller for pluggable database lifecycle management. | yaml file parameters | value | description /ords parameter | @@ -78,7 +78,7 @@ The following table reports the parameters required to configure and use oracle ### UNPLUG DATABASE -Use the following command to check kubernets pdb resources. Note that the output of the commands can be tailored to fit to your needs. Just check the structure of pdb resource **kubectl get pdbs -n oracle-database-operator-system -o=json** and modify the script accordingly. For the sake of simplicity put this command in a single script **checkpdbs.sh**. +Use the following command to check kubernets pdb resources. Note that the output of the commands can be tailored to meet your needs. Just check the structure of pdb resource **kubectl get pdbs -n oracle-database-operator-system -o=json** and modify the script accordingly. For the sake of simplicity put this command in a single script **checkpdbs.sh**. ```bash kubectl get pdbs -n oracle-database-operator-system -o=jsonpath='{range .items[*]} @@ -92,7 +92,7 @@ kubectl get pdbs -n oracle-database-operator-system -o=jsonpath='{range .items[* {"\n"}{end}' ``` -We assume that the pluggable database pdbdev is already configured on opened in read write mode +We assume that the pluggable database pdbdev is already configured and opened in read write mode ```bash ./checkpdbs.sh @@ -106,7 +106,7 @@ MSG=Success ``` -Prepare a new yaml file **pdb_unplug.yaml** to unplug the pdbdev database. Make sure that the path of the xml file is correct and check the existence of all the required secrets. +Prepare a new yaml file **pdb_unplug.yaml** to unplug the pdbdev database. Make sure that the path of the xml file is correct and check the existence of all the required secrets. Do not reuse an existing xml files. ```yaml # Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. @@ -248,7 +248,7 @@ Completed: DROP PLUGGABLE DATABASE "pdbdev" KEEP DATAFILES ``` -login to the server and check xml file existence. Get the datafile path on the ASM filesystem. +login to the server and check xml file existence. Verify the datafile path on the ASM filesystem. ```bash ls -ltr /tmp/pdbunplug.xml diff --git a/docs/multitenant/usecase02/pdb_clone.yaml b/docs/multitenant/usecase02/pdb_clone.yaml index be22020b..0ecc3c70 100644 --- a/docs/multitenant/usecase02/pdb_clone.yaml +++ b/docs/multitenant/usecase02/pdb_clone.yaml @@ -18,6 +18,7 @@ spec: fileNameConversions: "NONE" totalSize: "UNLIMITED" tempSize: "UNLIMITED" + assertivePdbDeletion: true adminName: secret: secretName: "pdb1-secret" diff --git a/docs/multitenant/usecase02/pdb_plug.yaml b/docs/multitenant/usecase02/pdb_plug.yaml index 0d0d3b37..77c00b9c 100644 --- a/docs/multitenant/usecase02/pdb_plug.yaml +++ b/docs/multitenant/usecase02/pdb_plug.yaml @@ -21,6 +21,7 @@ spec: totalSize: "1G" tempSize: "100M" action: "Plug" + assertivePdbDeletion: true pdbTlsKey: secret: secretName: "db-tls" diff --git a/docs/multitenant/usecase03/Dockerfile b/docs/multitenant/usecase03/Dockerfile index 5c27f11b..772a7e6d 100644 --- a/docs/multitenant/usecase03/Dockerfile +++ b/docs/multitenant/usecase03/Dockerfile @@ -1,34 +1,63 @@ -#LICENSE UPL 1.0 -# -# Copyright (c) 1982-2017 Oracle and/or its affiliates. All rights reserved. -# -# ORACLE DOCKERFILES PROJECT -# -------------------------- -# This is the Dockerfile for Oracle Rest Data Services 22.2 -# +## Copyright (c) 2022 Oracle and/or its affiliates. +## +## The Universal Permissive License (UPL), Version 1.0 +## +## Subject to the condition set forth below, permission is hereby granted to any +## person obtaining a copy of this software, associated documentation and/or data +## (collectively the "Software"), free of charge and under any and all copyright +## rights in the Software, and any and all patent rights owned or freely +## licensable by each licensor hereunder covering either (i) the unmodified +## Software as contributed to or provided by such licensor, or (ii) the Larger +## Works (as defined below), to deal in both +## +## (a) the Software, and +## (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +## one is included with the Software (each a "Larger Work" to which the Software +## is contributed by such licensors), +## +## without restriction, including without limitation the rights to copy, create +## derivative works of, display, perform, and distribute the Software and make, +## use, sell, offer for sale, import, export, have made, and have sold the +## Software and the Larger Work(s), and to sublicense the foregoing rights on +## either these or other terms. +## +## This license is subject to the following condition: +## The above copyright notice and either this complete permission notice or at +## a minimum a reference to the UPL must be included in all copies or +## substantial portions of the Software. +## +## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +## SOFTWARE. + FROM container-registry.oracle.com/java/jdk:latest # Environment variables required for this build (do NOT change) # ------------------------------------------------------------- ENV ORDS_HOME=/opt/oracle/ords/ \ - RUN_FILE="runOrdsSSL.sh" - -#RUN_FILE_NOSSL="runOrdsNOSSL.sh" + RUN_FILE="runOrdsSSL.sh" \ + ORDSVERSION=23.4.0-8 # Copy binaries # ------------- COPY $RUN_FILE $ORDS_HOME -#COPY $RUN_FILE_NOSSL $ORDS_HOME -RUN yum -y install yum-utils bind-utils tree hostname openssl net-tools zip unzip tar wget vim-minimal which sudo expect procps && \ +RUN yum -y install yum-utils bind-utils tree hostname openssl net-tools zip unzip tar wget vim-minimal which sudo expect procps curl lsof && \ yum-config-manager --add-repo=http://yum.oracle.com/repo/OracleLinux/OL8/oracle/software/x86_64 && \ yum -y install java-11-openjdk-devel && \ - yum -y install ords && \ yum -y install iproute && \ yum clean all +RUN curl -o /tmp/ords-$ORDSVERSION.el8.noarch.rpm https://yum.oracle.com/repo/OracleLinux/OL8/oracle/software/x86_64/getPackage/ords-$ORDSVERSION.el8.noarch.rpm + +RUN rpm -ivh /tmp/ords-$ORDSVERSION.el8.noarch.rpm + # Setup filesystem and oracle user -# ------------------------------------------------------------ +# -------------------------------- RUN mkdir -p $ORDS_HOME/doc_root && \ mkdir -p $ORDS_HOME/error && \ mkdir -p $ORDS_HOME/secrets && \ @@ -43,11 +72,9 @@ RUN mkdir -p $ORDS_HOME/doc_root && \ USER oracle WORKDIR /home/oracle -#VOLUME ["$ORDS_HOME/config/ords"] +VOLUME ["$ORDS_HOME/config/ords"] EXPOSE 8888 # Define default command to start Ords Services CMD $ORDS_HOME/$RUN_FILE -## ONLY FOR DEVELOPMENT STAGE -#CMD ["/usr/sbin/init"] diff --git a/docs/multitenant/usecase03/README.md b/docs/multitenant/usecase03/README.md index 3703a9ff..c06368cd 100644 --- a/docs/multitenant/usecase03/README.md +++ b/docs/multitenant/usecase03/README.md @@ -19,9 +19,9 @@ ### INTRODUCTION -> ☞ This folder contains the yaml files required to configure and manage cdb and pdb in different namespaces. The main change here is the possibility to specify the namespace where CDB will be created, this implies the introduction of a new parameter at PDB level in order to specify the CDB namespace. +> ☞ This folder contains the yaml files required to configure and manage cdb and pdb in different namespaces. The main change here is the possibility to specify the namespace where CDB will be created, this implies the introduction of new parameter at PDB level in order to specify the CDB namespace. -Tasks performed in the usecase03 are the same ones of the other usecases with the exception that controller pods cdb pods and pdb crd are running in different namespaces. You must be aware of the fact that secrets must be created in the proper namespaces; cdb secrets go into cdb namespace , pdb secrets go into pdbnamespace while certificate secrets need to be created in every namespace. +Tasks performed in the usecase03 are the same ones of the other usecase01 with the exception that controller pods cdb pods and pdb crd are running in different namespaces. You must be aware of the fact that secrets must be created in the proper namespaces; cdb secrets go into cdb namespace , pdb secrets go into pdbnamespace while certificate secrets need to be created in every namespace. | yaml file parameters | value | description /ords parameter | @@ -53,6 +53,7 @@ Tasks performed in the usecase03 are the same ones of the other usecases with th | tdeExport | | [tdeExport] | | tdeSecret | | [tdeSecret][tdeSecret] | | tdePassword | | [tdeSecret][tdeSecret] | +| assertivePdbDeletion | boolean | [turn on imperative approach on crd deleteion][imperative] | ![generla schema](./NamespaceSegregation.png) @@ -261,6 +262,7 @@ In order to facilitate the command execution use the [makefile](./makefile) avai |reloadop | Reload the db operator | |login | Login into cdb pod | - +[imperative]:https://kubernetes.io/docs/concepts/overview/working-with-objects/object-management/ + diff --git a/docs/multitenant/usecase03/cdb_create.yaml b/docs/multitenant/usecase03/cdb_create.yaml index 09dd7f86..d3b5e04f 100644 --- a/docs/multitenant/usecase03/cdb_create.yaml +++ b/docs/multitenant/usecase03/cdb_create.yaml @@ -5,9 +5,9 @@ metadata: namespace: cdbnamespace spec: cdbName: "DB12" - ordsImage: "lin.ocir.io/intsanjaysingh/mmalvezz/testppr/ords-dboper:latest" + ordsImage: ".............your registry............./ords-dboper:latest" ordsImagePullPolicy: "Always" - dbTnsurl : "(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS)))" + dbTnsurl : "...Container tns alias....." replicas: 1 sysAdminPwd: secret: diff --git a/docs/multitenant/usecase03/makefile b/docs/multitenant/usecase03/makefile index fc95cfa0..7270a5e0 100644 --- a/docs/multitenant/usecase03/makefile +++ b/docs/multitenant/usecase03/makefile @@ -123,8 +123,10 @@ URLPATH=/_/db-api/stable/database/pdbs/ OPENSSL=/usr/bin/openssl ORDSPORT=8888 MAKE=/usr/bin/make -DOCKERFILE=Dockerfile +DOCKERFILE=../../../ords/Dockerfile +RUNSCRIPT=../../../ords/runOrdsSSL.sh RM=/usr/bin/rm +CP=/bin/cp ECHO=/usr/bin/echo CERTMANAGER=https://github.com/jetstack/cert-manager/releases/latest/download/cert-manager.yaml CDB_SECRET_YAML=cdb_secret.yaml @@ -161,16 +163,8 @@ checkstep9: checkcdb createimage: @echo "BUILDING CDB IMAGES" - @if [[ ! -f ./Dockerfile ]]; \ - then\ - echo "DOCKERFILE DOES NOT EXISTS";\ - exit 1; \ - fi; - @if [[ ! -f ../runOrdsSSL.sh ]]; \ - then\ - echo "DOCKERFILE DOES NOT EXISTS";\ - exit 1; \ - fi; + $(CP) $(DOCKERFILE) . + $(CP) $(RUNSCRIPT) . $(DOCKER) build -t $(IMAGE) . tagimage: @@ -201,7 +195,7 @@ tlscert: $(OPENSSL) genrsa -out ca.key 2048 $(OPENSSL) req -new -x509 -days 365 -key ca.key -subj "/C=US/ST=California/L=SanFrancisco/O=$(COMPANY) /CN=$(RESTPREFIX)-$(REST_SERVER).$(CDB_NAMESPACE) /CN=$(LOCALHOST) Root CA " -out ca.crt $(OPENSSL) req -newkey rsa:2048 -nodes -keyout $(SKEY) -subj "/C=US/ST=California/L=SanFrancisco/O=$(COMPANY) /CN=$(RESTPREFIX)-$(REST_SERVER).$(CDB_NAMESPACE) /CN=$(LOCALHOST)" -out server.csr - $(ECHO) "subjectAltName=DNS:cdb-dev-$(REST_SERVER),DNS:www.example.com" > extfile.txt + $(ECHO) "subjectAltName=DNS:$(RESTPREFIX)-$(REST_SERVER).$(CDB_NAMESPACE),DNS:www.example.com" > extfile.txt $(OPENSSL) x509 -req -extfile extfile.txt -days 365 -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out $(SCRT) diff --git a/docs/multitenant/usecase03/runOrdsSSL.sh b/docs/multitenant/usecase03/runOrdsSSL.sh new file mode 100644 index 00000000..35f1b77b --- /dev/null +++ b/docs/multitenant/usecase03/runOrdsSSL.sh @@ -0,0 +1,190 @@ +#!/bin/bash + +cat <$TNSNAME + + +function SetParameter() { + ##ords config info <--- Use this command to get the list + +[[ ! -z "${ORACLE_HOST}" && -z "${DBTNSURL}" ]] && { + $ORDS --config ${CONFIG} config set db.hostname ${ORACLE_HOST:-racnode1} + $ORDS --config ${CONFIG} config set db.port ${ORACLE_PORT:-1521} + $ORDS --config ${CONFIG} config set db.servicename ${ORACLE_SERVICE:-TESTORDS} +} + +[[ -z "${ORACLE_HOST}" && ! -z "${DBTNSURL}" ]] && { + #$ORDS --config ${CONFIG} config set db.tnsAliasName ${TNSALIAS} + #$ORDS --config ${CONFIG} config set db.tnsDirectory ${TNS_ADMIN} + #$ORDS --config ${CONFIG} config set db.connectionType tns + + $ORDS --config ${CONFIG} config set db.connectionType customurl + $ORDS --config ${CONFIG} config set db.customURL jdbc:oracle:thin:@${DBTNSURL} +} + + $ORDS --config ${CONFIG} config set security.requestValidationFunction false + $ORDS --config ${CONFIG} config set jdbc.MaxLimit 100 + $ORDS --config ${CONFIG} config set jdbc.InitialLimit 50 + $ORDS --config ${CONFIG} config set error.externalPath ${ERRORFOLDER} + $ORDS --config ${CONFIG} config set standalone.access.log /home/oracle + $ORDS --config ${CONFIG} config set standalone.https.port 8888 + $ORDS --config ${CONFIG} config set standalone.https.cert ${CERTIFICATE} + $ORDS --config ${CONFIG} config set standalone.https.cert.key ${KEY} + $ORDS --config ${CONFIG} config set restEnabledSql.active true + $ORDS --config ${CONFIG} config set security.verifySSL true + $ORDS --config ${CONFIG} config set database.api.enabled true + $ORDS --config ${CONFIG} config set plsql.gateway.mode disabled + $ORDS --config ${CONFIG} config set database.api.management.services.disabled false + $ORDS --config ${CONFIG} config set misc.pagination.maxRows 1000 + $ORDS --config ${CONFIG} config set db.cdb.adminUser "${CDBADMIN_USER:-C##DBAPI_CDB_ADMIN} AS SYSDBA" + $ORDS --config ${CONFIG} config secret --password-stdin db.cdb.adminUser.password << EOF +${CDBADMIN_PWD:-PROVIDE_A_PASSWORD} +EOF + +$ORDS --config ${CONFIG} config user add --password-stdin ${WEBSERVER_USER:-ordspdbadmin} "SQL Administrator, System Administrator" <${CKF} 2>&1 +echo "checkfile" >> ${CKF} +NOT_INSTALLED=`cat ${CKF} | grep "INFO: The" |wc -l ` +echo NOT_INSTALLED=$NOT_INSTALLED + + +function StartUp () { + $ORDS --config $CONFIG serve --port 8888 --secure +} + +# Check whether ords is already setup +if [ $NOT_INSTALLED -ne 0 ] +then + echo " SETUP " + setupOrds; + StartUp; +fi + +if [ $NOT_INSTALLED -eq 0 ] +then + echo " STARTUP " + StartUp; +fi + + diff --git a/docs/sharding/README.md b/docs/sharding/README.md index 3d3320ee..0c817467 100644 --- a/docs/sharding/README.md +++ b/docs/sharding/README.md @@ -22,18 +22,21 @@ The Sharding Database controller in Oracle Database Operator deploys Oracle Shar The Oracle Sharding database controller provides end-to-end automation of Oracle Database sharding topology deployment in Kubernetes clusters. -## Using Oracle Sharding Database Operator +## Using Oracle Database Operator Sharding Controller -To create a Sharding Topology, complete the steps in the following sections below: +Following sections provide the details for deploying Oracle Globally Distributed Database (Oracle Sharded Database) using Oracle Database Operator Sharding Controller with different use cases: -1. [Prerequisites for running Oracle Sharding Database Controller](#prerequisites-for-running-oracle-sharding-database-controller) -2. [Provisioning Sharding Topology in a Cloud based Kubernetes Cluster (OKE in this case)](#provisioning-sharding-topology-in-a-cloud-based-kubernetes-cluster-oke-in-this-case) -3. [Connecting to Shard Databases](#connecting-to-shard-databases) -4. [Debugging and Troubleshooting](#debugging-and-troubleshooting) +* [Prerequisites for running Oracle Sharding Database Controller](#prerequisites-for-running-oracle-sharding-database-controller) +* [Oracle Database 23ai Free](#oracle-database-23ai-free) +* [Provisioning Sharding Topology with System-Managed Sharding in a Cloud-Based Kubernetes Cluster](#provisioning-sharding-topology-with-system-managed-sharding-in-a-cloud-based-kubernetes-cluster) +* [Provisioning Sharding Topology with User Defined Sharding in a Cloud-Based Kubernetes Cluster](#provisioning-sharding-topology-with-user-defined-sharding-in-a-cloud-based-kubernetes-cluster) +* [Provisioning System-Managed Sharding Topology with Raft replication enabled in a Cloud-Based Kubernetes Cluster](#provisioning-system-managed-sharding-topology-with-raft-replication-enabled-in-a-cloud-based-kubernetes-cluster) +* [Connecting to Shard Databases](#connecting-to-shard-databases) +* [Debugging and Troubleshooting](#debugging-and-troubleshooting) **Note** Before proceeding to the next section, you must complete the instructions given in each section, based on your enviornment, before proceeding to next section. -## Prerequisites for Running Oracle Sharding Database Controller +## Prerequisites for running Oracle Sharding Database Controller **IMPORTANT:** You must make the changes specified in this section before you proceed to the next section. @@ -85,6 +88,8 @@ You can either download the images and push them to your Docker Images Repositor **Note**: In the sharding example yaml files, we are using GDS and database images available on [Oracle Container Registry](https://container-registry.oracle.com/ords/f?p=113:10::::::). +**Note:** In case you want to use the `Oracle Database 23ai Free` Image for Database and GSM, refer to section [Oracle Database 23ai Free](#oracle-database-23ai-free) for more details. + ### 4. Create a namespace for the Oracle DB Sharding Setup Create a Kubernetes namespace named `shns`. All the resources belonging to the Oracle Database Sharding Setup will be provisioned in this namespace named `shns`. For example: @@ -101,7 +106,7 @@ You can either download the images and push them to your Docker Images Repositor Create a Kubernetes secret named `db-user-pass-rsa` using these steps: [Create Kubernetes Secret](./provisioning/create_kubernetes_secret_for_db_user.md) -After you have the above prerequsites completed, you can proceed to the next section for your environment to provision the Oracle Database Sharding Topology. +After you have the above prerequisites completed, you can proceed to the next section for your environment to provision the Oracle Database Sharding Topology. ### 6. Provisioning a Persistent Volume having an Oracle Database Gold Image @@ -111,19 +116,35 @@ In case of an `OCI OKE` cluster, you can use this Persistent Volume during provi You can refer [here](./provisioning/provisioning_persistent_volume_having_db_gold_image.md) for the steps involved. -## Provisioning Sharding Topology with System Sharding in a Cloud-Based Kubernetes Cluster +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. So, this step will not be needed if you are deploying Oracle Sharded Database using Oracle 23ai Free Database and GSM Images. + +## Oracle Database 23ai Free + +Please refer to [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) documentation for more details. + +If you want to use Oracle Database 23ai Free Image for Database and GSM for deployment of the Sharded Database using Sharding Controller in Oracle Database Kubernetes Operator, you need to consider the below points: + +* To deploy using the FREE Database and GSM Image, you will need to add the additional parameter `dbEdition: "free"` to the .yaml file. +* Refer to [Sample Sharded Database Deployment using Oracle 23ai FREE Database and GSM Images](./provisioning/free/sharding_provisioning_with_free_images.md) for an example. +* For Oracle Database 23ai Free, you can control the `CPU` and `Memory` allocation of the PODs using tags `cpu` and `memory` respectively but tags `INIT_SGA_SIZE` and `INIT_PGA_SIZE` to control the SGA and PGA allocation at the database level are `not` supported. +* Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. +* Total number of chunks for FREE Database defaults to `12` if `CATALOG_CHUNKS` parameter is not specified. This default value is determined considering limitation of 12 GB of user data on disk for oracle free database. + + +## Provisioning Sharding Topology with System-Managed Sharding in a Cloud-Based Kubernetes Cluster -Deploy Oracle Database Sharding Topology with `System Sharding` on your Cloud based Kubernetes cluster. +Deploy Oracle Database Sharding Topology with `System-Managed Sharding` on your Cloud based Kubernetes cluster. In this example, the deployment uses the YAML file based on `OCI OKE` cluster. There are multiple use case possible for deploying the Oracle Database Sharding Topology covered by below examples: -[1. Provisioning Oracle Sharded Database with System Sharding without Database Gold Image](./provisioning/system_sharding/ssharding_provisioning_without_db_gold_image.md) -[2. Provisioning Oracle Sharded Database with System Sharding with additional control on resources like Memory and CPU allocated to Pods](./provisioning/system_sharding/ssharding_provisioning_with_control_on_resources.md) -[3. Provisioning Oracle Sharded Database with System Sharding by cloning database from your own Database Gold Image in the same Availability Domain(AD)](./provisioning/system_sharding/ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md) -[4. Provisioning Oracle Sharded Database with System Sharding by cloning database from your own Database Gold Image across Availability Domains(ADs)](./provisioning/system_sharding/ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md) -[5. Provisioning Oracle Sharded Database with System Sharding and send Notification using OCI Notification Service](./provisioning/system_sharding/ssharding_provisioning_with_notification_using_oci_notification.md) -[6. Scale Out - Add Shards to an existing Oracle Sharded Database provisioned earlier with System Sharding](./provisioning/system_sharding/ssharding_scale_out_add_shards.md) -[7. Scale In - Delete an existing Shard from a working Oracle Sharded Database provisioned earlier with System Sharding](./provisioning/system_sharding/ssharding_scale_in_delete_an_existing_shard.md) +[1. Provisioning Oracle Sharded Database with System-Managed Sharding without Database Gold Image](./provisioning/system_sharding/ssharding_provisioning_without_db_gold_image.md) +[2. Provisioning Oracle Sharded Database with System-Managed Sharding with number of chunks specified](./provisioning/system_sharding/ssharding_provisioning_with_chunks_specified.md) +[3. Provisioning Oracle Sharded Database with System-Managed Sharding with additional control on resources like Memory and CPU allocated to Pods](./provisioning/system_sharding/ssharding_provisioning_with_control_on_resources.md) +[4. Provisioning Oracle Sharded Database with System-Managed Sharding by cloning database from your own Database Gold Image in the same Availability Domain(AD)](./provisioning/system_sharding/ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md) +[5. Provisioning Oracle Sharded Database with System-Managed Sharding by cloning database from your own Database Gold Image across Availability Domains(ADs)](./provisioning/system_sharding/ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md) +[6. Provisioning Oracle Sharded Database with System-Managed Sharding and send Notification using OCI Notification Service](./provisioning/system_sharding/ssharding_provisioning_with_notification_using_oci_notification.md) +[7. Scale Out - Add Shards to an existing Oracle Sharded Database provisioned earlier with System-Managed Sharding](./provisioning/system_sharding/ssharding_scale_out_add_shards.md) +[8. Scale In - Delete an existing Shard from a working Oracle Sharded Database provisioned earlier with System-Managed Sharding](./provisioning/system_sharding/ssharding_scale_in_delete_an_existing_shard.md) ## Provisioning Sharding Topology with User Defined Sharding in a Cloud-Based Kubernetes Cluster @@ -140,6 +161,24 @@ In this example, the deployment uses the YAML file based on `OCI OKE` cluster. T [6. Scale Out - Add Shards to an existing Oracle Sharded Database provisioned earlier with User Defined Sharding](./provisioning/user-defined-sharding/udsharding_scale_out_add_shards.md) [7. Scale In - Delete an existing Shard from a working Oracle Sharded Database provisioned earlier with User Defined Sharding](./provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md) + +## Provisioning System-Managed Sharding Topology with Raft replication enabled in a Cloud-Based Kubernetes Cluster + +Deploy Oracle Database Sharding Topology with `System-Managed Sharding with SNR RAFT enabled` on your Cloud based Kubernetes cluster. + +**NOTE: SNR RAFT Feature is available only for Oracle 23ai RDBMS and Oracle 23ai GSM version.** + +In this example, the deployment uses the YAML file based on `OCI OKE` cluster. There are multiple use case possible for deploying the Oracle Database Sharding Topology covered by below examples: + +[1. Provisioning System-Managed Sharding Topology with Raft replication enabled without Database Gold Image](./provisioning/snr_system_sharding/snr_ssharding_provisioning_without_db_gold_image.md) +[2. Provisioning System-Managed Sharding Topology with Raft replication enabled with number of chunks specified](./provisioning/snr_system_sharding/snr_ssharding_provisioning_with_chunks_specified.md) +[3. Provisioning System-Managed Sharding Topology with Raft replication enabled with additional control on resources like Memory and CPU allocated to Pods](./provisioning/snr_system_sharding/snr_ssharding_provisioning_with_control_on_resources.md) +[4. Provisioning System-Managed Sharding Topology with Raft replication enabled by cloning database from your own Database Gold Image in the same Availability Domain(AD)](./provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md) +[5. Provisioning System-Managed Sharding Topology with Raft replication enabled by cloning database from your own Database Gold Image across Availability Domains(ADs)](./provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md) +[6. Provisioning System-Managed Sharding Topology with Raft replication enabled and send Notification using OCI Notification Service](./provisioning/snr_system_sharding/snr_ssharding_provisioning_with_notification_using_oci_notification.md) +[7. Scale Out - Add Shards to an existing Oracle Sharded Database provisioned earlier with System-Managed Sharding with RAFT replication enabled](./provisioning/snr_system_sharding/snr_ssharding_scale_out_add_shards.md) +[8. Scale In - Delete an existing Shard from a working Oracle Sharded Database provisioned earlier with System-Managed Sharding with RAFT reolication enabled](./provisioning/snr_system_sharding/snr_ssharding_scale_in_delete_an_existing_shard.md) + ## Connecting to Shard Databases After the Oracle Database Sharding Topology has been provisioned using the Sharding Controller in Oracle Database Kubernetes Operator, you can follow the steps in this document to connect to the Sharded Database or to the individual Shards: [Database Connectivity](./provisioning/database_connection.md) diff --git a/docs/sharding/provisioning/debugging.md b/docs/sharding/provisioning/debugging.md index 545bf034..63e02b6a 100644 --- a/docs/sharding/provisioning/debugging.md +++ b/docs/sharding/provisioning/debugging.md @@ -41,3 +41,10 @@ kubectl exec -it catalog-0 -n shns /bin/bash ``` Now, you can troubleshooting the corresponding component using the alert log or the trace files etc just like a normal Sharding Database Deployment. Please refer to [Oracle Database Sharding Documentation](https://docs.oracle.com/en/database/oracle/oracle-database/19/shard/sharding-troubleshooting.html#GUID-629262E5-7910-4690-A726-A565C59BA73E) for this purpose. + + +## Debugging using Database Events + +* You can enable database events as part of the Sharded Database Deployment +* This can be enabled using the `envVars` +* One example of enabling Database Events is [sharding_provisioning_with_db_events.md](./debugging/sharding_provisioning_with_db_events.md) \ No newline at end of file diff --git a/docs/sharding/provisioning/debugging/sharding_provisioning_with_db_events.md b/docs/sharding/provisioning/debugging/sharding_provisioning_with_db_events.md new file mode 100644 index 00000000..fa73920f --- /dev/null +++ b/docs/sharding/provisioning/debugging/sharding_provisioning_with_db_events.md @@ -0,0 +1,40 @@ +# Example of provisioning Oracle Sharded Database along with DB Events set at Database Level + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +This example sets a Database Event at the Database Level for Catalog and Shard Databases. + +The sharded database in this example is deployed with System-Managed Sharding type. In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with System-Managed Sharding is deployed using Oracle Sharding controller. + +**NOTE:** In this use case, because DBCA creates the database automatically during the deployment, the time required to create the database is greater than the time it takes when the database is created by cloning from a Database Gold Image. + +This example uses `sharding_provisioning_with_db_events.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Namespace: `shns` +* Database Event: `10798 trace name context forever, level 7` set along with `GWM_TRACE level 263` + + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `sharding_provisioning_with_db_events.yaml`. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + + +Use the file: [sharding_provisioning_with_db_events.yaml](./sharding_provisioning_with_db_events.yaml) for this use case as below: + +1. Deploy the `sharding_provisioning_with_db_events.yaml` file: + ```sh + kubectl apply -f sharding_provisioning_with_db_events.yaml + ``` +2. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard1-0": + kubectl logs -f pod/shard1-0 -n shns + ``` +3. You can confirm the Database event and the tracing enabled in the RDBMS alert log file of the Database. \ No newline at end of file diff --git a/docs/sharding/provisioning/debugging/sharding_provisioning_with_db_events.yaml b/docs/sharding/provisioning/debugging/sharding_provisioning_with_db_events.yaml new file mode 100644 index 00000000..7d136d58 --- /dev/null +++ b/docs/sharding/provisioning/debugging/sharding_provisioning_with_db_events.yaml @@ -0,0 +1,69 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v1alpha1 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + envVars: + - name: "DB_EVENTS" + value: "10798 trace name context forever, level 7:scope=spfile;immediate trace name GWM_TRACE level 263" + - name: shard2 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + envVars: + - name: "DB_EVENTS" + value: "10798 trace name context forever, level 7:scope=spfile;immediate trace name GWM_TRACE level 263" + - name: shard3 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + envVars: + - name: "DB_EVENTS" + value: "10798 trace name context forever, level 7:scope=spfile;immediate trace name GWM_TRACE level 263" + catalog: + - name: catalog + storageSizeInGb: 50 + imagePullPolicy: "Always" + envVars: + - name: "DB_EVENTS" + value: "10798 trace name context forever, level 7:scope=spfile;immediate trace name GWM_TRACE level 263" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/enterprise:latest + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImagePullSecret: ocr-reg-cred + isExternalSvc: False + isDeleteOraPvc: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary + namespace: shns diff --git a/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.md b/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.md new file mode 100644 index 00000000..61641312 --- /dev/null +++ b/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.md @@ -0,0 +1,40 @@ +# Example of provisioning Oracle Sharded Database with Oracle 23ai FREE Database and GSM Images + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +This example uses the Oracle 23ai FREE Database and GSM Images. + +The sharded database in this example is deployed with System-Managed Sharding type. In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with System-Managed Sharding is deployed using Oracle Sharding controller. + +**NOTE:** In this use case, because DBCA creates the database automatically during the deployment, the time required to create the database is greater than the time it takes when the database is created by cloning from a Database Gold Image. + +This example uses `sharding_provisioning_with_free_images.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Namespace: `shns` + + +To get the Oracle 23ai FREE Database and GSM Images: + * The Oracle 23ai FREE RDBMS Image used is `container-registry.oracle.com/database/free:latest`. Check [Oracle Database Free Get Started](https://www.oracle.com/database/free/get-started/?source=v0-DBFree-ChatCTA-j2032-20240709) for details. + * To pull the above image from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * Use the Oracle 23ai FREE GSM Binaries `LINUX.X64_234000_gsm.zip` as listed on page [Oracle Database Free Get Started](https://www.oracle.com/database/free/get-started/?source=v0-DBFree-ChatCTA-j2032-20240709) and prepare the GSM Container Image following [Oracle Global Data Services Image](https://github.com/oracle/db-sharding/tree/master/docker-based-sharding-deployment/dockerfiles) + * You need to change `dbImage` and `gsmImage` tag with the images you want to use in your enviornment in file `sharding_provisioning_with_free_images.yaml`. + + + +Use the file: [sharding_provisioning_with_free_images.yaml](./sharding_provisioning_with_free_images.yaml) for this use case as below: + +1. Deploy the `sharding_provisioning_with_free_images.yaml` file: + ```sh + kubectl apply -f sharding_provisioning_with_free_images.yaml + ``` +2. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard1-0": + kubectl logs -f pod/shard1-0 -n shns + ``` \ No newline at end of file diff --git a/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.yaml b/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.yaml new file mode 100644 index 00000000..7e39b3b2 --- /dev/null +++ b/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.yaml @@ -0,0 +1,58 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v1alpha1 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + imagePullPolicy: "Always" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/free:latest + dbImagePullSecret: ocr-reg-cred + gsmImage: + gsmImagePullSecret: + dbEdition: "free" + isExternalSvc: False + isDeleteOraPvc: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary + namespace: shns diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md new file mode 100644 index 00000000..ba72be25 --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md @@ -0,0 +1,57 @@ +# Provisioning System managed Sharding Topology with Raft replication enabled by cloning database from your own Database Gold Image across Availability Domains(ADs) + +**NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +In this test case, you provision the System managed Sharding Topology with Raft replication enabled while provisioning the Catalog and Shard Databases by cloning from an existing Oracle Database Gold Image created earlier. + +This use case applies when you want to provision the database Pods on a Kubernetes Node in any availability domain (AD), which can also be different from the availability domain (AD) of the Block Volume that has the Oracle Database Gold Image provisioned earlier. + +Choosing this option takes substantially less time during the Oracle Database Sharding Topology setup across ADs. + +NOTE: + +* Cloning from Block Volume Backup in OCI enables the new Persistent Volumes to be created in other ADs. +* To specify the AD where you want to provision the database Pod, use the tag `nodeSelector` and the POD will be provisioned in a node running in that AD. +* To specify GSM containers, you can also use the tag `nodeSelector` to specify the AD. +* Before you can provision with the Gold Image, you need the OCID of the Persistent Volume that has the Oracle Database Gold Image. + +1. Check the OCID of the Persistent Volume provisioned for the Oracle Database Gold Image: + ```sh + kubectl get pv -n shns + ``` +2. Create a Block Volume Backup for this Block Volume, and use the OCID of the Block Volume Backup in the next step. This example uses `snr_ssharding_shard_prov_clone_across_ads.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Namespace: `shns` +* Database Cloning from the `BLOCK VOLUME FULL BACKUP` of the Persistent Volume which had the Gold Image. +* OCID of the Block Volume Backup: `ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq` +* `RAFT Replication` enabled + +NOTE: In this case, the Persistent Volume with DB Gold Image was provisioned in the Availability Domain `PHX-AD-1`. The Shards and Catalog will be provisioned across multiple Availability Domains by cloning the database. + +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov_clone_across_ads.yaml`. + * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + +Use the file: [snr_ssharding_shard_prov_clone_across_ads.yaml](./snr_ssharding_shard_prov_clone_across_ads.yaml) for this use case as below: + +1. Deploy the `snr_ssharding_shard_prov_clone_across_ads.yaml` file: + ```sh + kubectl apply -f snr_ssharding_shard_prov_clone_across_ads.yaml + ``` +2. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard1-0": + kubectl logs -f pod/shard1-0 -n shns + diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md new file mode 100644 index 00000000..cf4240f7 --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md @@ -0,0 +1,53 @@ +# Provisioning System managed Sharding Topology with Raft replication enabled by cloning database from your own Database Gold Image in the same Availability Domain(AD) + +**NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +In this test case, you provision the System managed Sharding Topology with Raft replication enabled while provisioning the Catalog and Shard Databases by cloning from an existing Oracle Database Gold Image created earlier. + +This use case applies when you are cloning from a Block Volume, and you can clone _only_ in the same availability domain (AD). The result is that the cloned shard database PODs can be created _only_ in the same AD where the Gold Image Block Volume is present. + +Choosing this option takes substantially less time during the Oracle Database Sharding Topology setup. + +**NOTE** For this step, the Persistent Volume that has the Oracle Database Gold Image is identified using its OCID. + +1. Check the OCID of the Persistent Volume provisioned earlier using below command: + + ```sh + kubectl get pv -n shns + ``` + +2. This example uses `snr_ssharding_shard_prov_clone.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Namespace: `shns` +* Database Cloning from the Database Gold Image present in Persistent Volume having OCID: `ocid1.volume.oc1.phx.abyhqljr3z3w72t6ay5eud7d5w3kdfhktfp6gwb6euy5tzwfaxgmbvwqlvsq` +* `RAFT Replication` enabled + +NOTE: In this case, the Persistent Volume with DB Gold Image was provisioned in the Availability Domain `PHX-AD-1`. The Shards and Catalog will be provisioned in the same Availability Domain `PHX-AD-1` by cloning the database. + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov_clone.yaml`. + * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. + +Use the file: [snr_ssharding_shard_prov_clone.yaml](./snr_ssharding_shard_prov_clone.yaml) for this use case as below: + +1. Deploy the `snr_ssharding_shard_prov_clone.yaml` file: + ```sh + kubectl apply -f snr_ssharding_shard_prov_clone.yaml + ``` +2. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard1-0": + kubectl logs -f pod/shard1-0 -n shns + ``` diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_chunks_specified.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_chunks_specified.md new file mode 100644 index 00000000..44972090 --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_chunks_specified.md @@ -0,0 +1,43 @@ +# Provisioning System-Managed Sharding Topology with Raft replication enabled with number of chunks specified + +**NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with System-Managed with RAFT Replication enabled is deployed using Oracle Sharding controller. + +**NOTE** In this use case, because DBCA creates the database automatically during the deployment, the time required to create the database is greater than the time it takes when the database is created by cloning from a Database Gold Image. + +By default, the System-Managed with RAFT Replication deploys the Sharded Database with 360 chunks per Shard Database (because there are 3 chunks created for each replication unit). In this example, the Sharded Database will be deployed with non-default number of chunks specified using parameter `CATALOG_CHUNKS`. + +This example uses `snr_ssharding_shard_prov_chunks.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Total number of chunks as `120` specified by variable `CATALOG_CHUNKS` (it will be 120 chunks per shard) +* Namespace: `shns` +* `RAFT Replication` enabled + + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov.yaml`. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + + +Use the file: [snr_ssharding_shard_prov_chunks.yaml](./snr_ssharding_shard_prov_chunks.yaml) for this use case as below: + +1. Deploy the `snr_ssharding_shard_prov_chunks.yaml` file: + ```sh + kubectl apply -f snr_ssharding_shard_prov_chunks.yaml + ``` +1. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard1-0": + kubectl logs -f pod/shard1-0 -n shns + ``` diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_control_on_resources.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_control_on_resources.md new file mode 100644 index 00000000..9cfd6afb --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_control_on_resources.md @@ -0,0 +1,47 @@ +# Provisioning System-Managed Sharding Topology with Raft replication enabled with additional control on resources like Memory and CPU allocated to Pods + +**NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +In this use case, there are additional tags used to control resources such as CPU and Memory used by the different Pods when the Oracle Sharding topology with System-Managed with RAFT Replication is deployed using Oracle Sharding controller. + +This example uses `snr_ssharding_shard_prov_memory_cpu.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Namespace: `shns` +* Tags `memory` and `cpu` to control the Memory and CPU of the PODs +* Additional tags `INIT_SGA_SIZE` and `INIT_PGA_SIZE` to control the SGA and PGA allocation at the database level +* `RAFT Replication` enabled + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov_memory_cpu.yaml`. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + +**NOTE:** For Oracle Database 23ai Free, you can control the `CPU` and `Memory` allocation of the PODs using tags `cpu` and `memory` respectively but tags `INIT_SGA_SIZE` and `INIT_PGA_SIZE` to control the SGA and PGA allocation at the database level are `not` supported. + +Use the YAML file [snr_ssharding_shard_prov_memory_cpu.yaml](./snr_ssharding_shard_prov_memory_cpu.yaml). + +1. Deploy the `snr_ssharding_shard_prov_memory_cpu.yaml` file: + + ```sh + kubectl apply -f snr_ssharding_shard_prov_memory_cpu.yaml + ``` + +1. Check the details of a POD. For example: To check the details of Pod `shard1-0`: + + ```sh + kubectl describe pod/shard1-0 -n shns + ``` +3. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard1-0": + kubectl logs -f pod/shard1-0 -n shns + ``` diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_notification_using_oci_notification.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_notification_using_oci_notification.md new file mode 100644 index 00000000..d4cb11de --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_notification_using_oci_notification.md @@ -0,0 +1,87 @@ +# Provisioning System managed Sharding Topology with Raft replication enabled and send Notification using OCI Notification Service + +**NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +This use case demonstrates how to use a notification service like OCI Notification service to send an email notification when a particular operation is completed on an Oracle Database sharding topology provisioned using the Oracle Database sharding controller. + +This example uses `snr_ssharding_shard_prov_send_notification.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Namespace: `shns` +* Database Cloning from the `BLOCK VOLUME FULL BACKUP` of the Persistent Volume that has the Database Gold Image created earlier. +* OCID of the Block Volume Backup: `ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq` +* Configmap to send notification email when a particular operation is completed. For example: When a shard is added. +* `RAFT Replication` enabled + +**NOTE:** + +* The notification will be sent using a configmap created with the credentials of the OCI user account in this use case. + +We will create a topic in Notification Service of the OCI Console and use its OCID. + +To do this: + +1. Create a `configmap_data.txt` file, such as the following, which has the OCI User details that will be used to send notfication: + + ```sh + user=ocid1.user.oc1........fx7omxfq + fingerprint=fa:18:98:...............:8a + tenancy=ocid1.tenancy.oc1..aaaa.......orpn7inq + region=us-phoenix-1 + topicid=ocid1.onstopic.oc1.phx.aaa............6xrq + ``` +2. Create a configmap using the below command using the file created above: + ```sh + kubectl create configmap onsconfigmap --from-file=./configmap_data.txt -n shns + ``` + +3. Create a key file `priavatekey` having the PEM key of the OCI user being used to send notification: + ```sh + -----BEGIN PRIVATE KEY-G---- + MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCXYxA0DJvEwtVR + +o4OxrunL3L2NZJRADTFR+TDHqrNF1JwbaFBizSdL+EXbxQW1faZs5lXZ/sVmQF9 + . + . + . + zn/xWC0FzXGRzfvYHhq8XT3omf6L47KqIzqo3jDKdgvVq4u+lb+fXJlhj6Rwi99y + QEp36HnZiUxAQnR331DacN+YSTE+vpzSwZ38OP49khAB1xQsbiv1adG7CbNpkxpI + nS7CkDLg4Hcs4b9bGLHYJVY= + -----END PRIVATE KEY----- + ``` +4. Use the key file `privatekey` to create a Kubernetes secret in namespace `shns`: + + ```sh + kubectl create secret generic my-secret --from-file=./privatekey -n shns + ``` + +5. Use this command to check details of the secret that you created: + + ```sh + kubectl describe secret my-secret -n shns + ``` + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov_send_notification.yaml`. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. + +Use the file: [snr_ssharding_shard_prov_send_notification.yaml](./snr_ssharding_shard_prov_send_notification.yaml) for this use case as below: + +1. Deploy the `snr_ssharding_shard_prov_send_notification.yaml` file: + ```sh + kubectl apply -f snr_ssharding_shard_prov_send_notification.yaml + ``` +2. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard1-0": + kubectl logs -f pod/shard1-0 -n shns diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_without_db_gold_image.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_without_db_gold_image.md new file mode 100644 index 00000000..892741a5 --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_without_db_gold_image.md @@ -0,0 +1,40 @@ +# Provisioning System-Managed Sharding Topology with Raft replication enabled without Database Gold Image + +**NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with System-Managed with RAFT Replication enabled is deployed using Oracle Sharding controller. + +**NOTE** In this use case, because DBCA creates the database automatically during the deployment, the time required to create the database is greater than the time it takes when the database is created by cloning from a Database Gold Image. + +This example uses `snr_ssharding_shard_prov.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Namespace: `shns` +* `RAFT Replication` enabled + + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov.yaml`. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + + +Use the file: [snr_ssharding_shard_prov.yaml](./snr_ssharding_shard_prov.yaml) for this use case as below: + +1. Deploy the `snr_ssharding_shard_prov.yaml` file: + ```sh + kubectl apply -f snr_ssharding_shard_prov.yaml + ``` +1. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard1-0": + kubectl logs -f pod/shard1-0 -n shns + ``` diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_in_delete_an_existing_shard.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_in_delete_an_existing_shard.md new file mode 100644 index 00000000..fe3157ec --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_in_delete_an_existing_shard.md @@ -0,0 +1,50 @@ +# Scale In - Delete an existing Shard from a working Oracle Sharded Database provisioned earlier with System-Managed Sharding with RAFT reolication enabled + +**NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +This use case demonstrates how to delete an existing Shard from an existing Oracle Database sharding topology with System-Managed with RAFT Replication enabled provisioned using Oracle Database Sharding controller. + +**NOTE** The deletion of a shard is done after verifying the Chunks have been moved out of that shard. + +In this use case, the existing database Sharding is having: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Five sharding Pods: `shard1`,`shard2`,`shard3`,`shard4` and `shard5` +* One Catalog Pod: `catalog` +* Namespace: `shns` +* `RAFT Replication` enabled + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov_delshard.yaml`. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + +NOTE: Use tag `isDelete: enable` to delete the shard you want. + +This use case deletes the shard `shard4` from the above Sharding Topology. + +Use the file: [snr_ssharding_shard_prov_delshard.yaml](./snr_ssharding_shard_prov_delshard.yaml) for this use case as below: + +1. Deploy the `snr_ssharding_shard_prov_delshard.yaml` file: + ```sh + kubectl apply -f snr_ssharding_shard_prov_delshard.yaml + ``` +2. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + +**NOTE:** After you apply `snr_ssharding_shard_prov_delshard.yaml`, the change may not be visible immediately. When the shard is removed, first the chunks will be moved out of that shard that is going to be deleted. + +To monitor the chunk movement, use the following command: + +```sh +# Switch to the primary GSM Container: +kubectl exec -i -t gsm1-0 -n shns /bin/bash + +# Check the status of the chunks and repeat to observe the chunk movement: +gdsctl config chunks +``` diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_out_add_shards.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_out_add_shards.md new file mode 100644 index 00000000..03423e72 --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_out_add_shards.md @@ -0,0 +1,37 @@ +# Scale Out - Add Shards to an existing Oracle Sharded Database provisioned earlier with System-Managed Sharding with RAFT replication enabled + +**NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +This use case demonstrates adding a new shard to an existing Oracle Database sharding topology with System-Managed with RAFT Replication enabled provisioned earlier using Oracle Database Sharding controller. + +In this use case, the existing Oracle Database sharding topology is having: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Namespace: `shns` +* `RAFT Replication` enabled + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov_extshard.yaml`. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * If the existing Sharding Topology was deployed using [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then the additional parameter `dbEdition: "free"` will be needed for the below .yaml file as well. + +This use case adds two new shards `shard4`,`shard5` to above Sharding Topology. + +Use the file: [snr_ssharding_shard_prov_extshard.yaml](./snr_ssharding_shard_prov_extshard.yaml) for this use case as below: + +1. Deploy the `snr_ssharding_shard_prov_extshard.yaml` file: + ```sh + kubectl apply -f snr_ssharding_shard_prov_extshard.yaml + ``` +2. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard4-0": + kubectl logs -f pod/shard4-0 -n shns diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov.yaml new file mode 100644 index 00000000..efe3abec --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov.yaml @@ -0,0 +1,58 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v1alpha1 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + imagePullPolicy: "Always" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/enterprise:latest + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImagePullSecret: ocr-reg-cred + replicationType: "native" + isExternalSvc: False + isDeleteOraPvc: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary + namespace: shns \ No newline at end of file diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_chunks.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_chunks.yaml new file mode 100644 index 00000000..a79eafdc --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_chunks.yaml @@ -0,0 +1,61 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v1alpha1 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + imagePullPolicy: "Always" + envVars: + - name: "CATALOG_CHUNKS" + value: "120" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/enterprise:latest + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImagePullSecret: ocr-reg-cred + replicationType: "native" + isExternalSvc: False + isDeleteOraPvc: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary + namespace: shns diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone.yaml new file mode 100644 index 00000000..218fda0a --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone.yaml @@ -0,0 +1,83 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v1alpha1 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volume.oc1.phx.abyhqljr3z3w72t6ay5eud7d5w3kdfhktfp6gwb6euy5tzwfaxgmbvwqlvsq + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volume.oc1.phx.abyhqljr3z3w72t6ay5eud7d5w3kdfhktfp6gwb6euy5tzwfaxgmbvwqlvsq + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volume.oc1.phx.abyhqljr3z3w72t6ay5eud7d5w3kdfhktfp6gwb6euy5tzwfaxgmbvwqlvsq + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volume.oc1.phx.abyhqljr3z3w72t6ay5eud7d5w3kdfhktfp6gwb6euy5tzwfaxgmbvwqlvsq + imagePullPolicy: "Always" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/enterprise:latest + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImagePullSecret: ocr-reg-cred + replicationType: "native" + isExternalSvc: False + isDeleteOraPvc: True + isClone: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary + namespace: shns \ No newline at end of file diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone_across_ads.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone_across_ads.yaml new file mode 100644 index 00000000..4eb3954a --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone_across_ads.yaml @@ -0,0 +1,91 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v1alpha1 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-2" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-2" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + imagePullPolicy: "Always" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + storageClass: oci + dbImage: container-registry.oracle.com/database/enterprise:latest + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImagePullSecret: ocr-reg-cred + replicationType: "native" + isExternalSvc: False + isDeleteOraPvc: True + isClone: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary + namespace: shns \ No newline at end of file diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_delshard.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_delshard.yaml new file mode 100644 index 00000000..145ef616 --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_delshard.yaml @@ -0,0 +1,69 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v1alpha1 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard4 + isDelete: enable + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard5 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + imagePullPolicy: "Always" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/enterprise:latest + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImagePullSecret: ocr-reg-cred + replicationType: "native" + isExternalSvc: False + isDeleteOraPvc: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary + namespace: shns \ No newline at end of file diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_extshard.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_extshard.yaml new file mode 100644 index 00000000..ea0c05a5 --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_extshard.yaml @@ -0,0 +1,68 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v1alpha1 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard4 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard5 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + imagePullPolicy: "Always" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/enterprise:latest + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImagePullSecret: ocr-reg-cred + replicationType: "native" + isExternalSvc: False + isDeleteOraPvc: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary + namespace: shns \ No newline at end of file diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_memory_cpu.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_memory_cpu.yaml new file mode 100644 index 00000000..5c15c724 --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_memory_cpu.yaml @@ -0,0 +1,89 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v1alpha1 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + resources: + requests: + memory: "1000Mi" + cpu: "1000m" + envVars: + - name: "INIT_SGA_SIZE" + value: "600" + - name: "INIT_PGA_SIZE" + value: "400" + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + resources: + requests: + memory: "1000Mi" + cpu: "1000m" + envVars: + - name: "INIT_SGA_SIZE" + value: "600" + - name: "INIT_PGA_SIZE" + value: "400" + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + resources: + requests: + memory: "1000Mi" + cpu: "1000m" + envVars: + - name: "INIT_SGA_SIZE" + value: "600" + - name: "INIT_PGA_SIZE" + value: "400" + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + resources: + requests: + memory: "1000Mi" + cpu: "1000m" + imagePullPolicy: "Always" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/enterprise:latest + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImagePullSecret: ocr-reg-cred + replicationType: "native" + isExternalSvc: False + isDeleteOraPvc: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary + namespace: shns \ No newline at end of file diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_send_notification.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_send_notification.yaml new file mode 100644 index 00000000..50c85443 --- /dev/null +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_send_notification.yaml @@ -0,0 +1,85 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v1alpha1 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-2" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-2" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-1" + pvAnnotations: + volume.beta.kubernetes.io/oci-volume-source: ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq + imagePullPolicy: "Always" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/enterprise:latest + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImagePullSecret: ocr-reg-cred + replicationType: "native" + isExternalSvc: False + isDeleteOraPvc: True + isClone: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + nsConfigMap: onsconfigmap + nsSecret: my-secret + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary + namespace: shns \ No newline at end of file diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md index 9ae05d50..64e2f4eb 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md @@ -1,8 +1,8 @@ -# Provisioning Oracle Sharded Database with System Sharding by cloning database from your own Database Gold Image across Availability Domains(ADs) +# Provisioning Oracle Sharded Database with System-Managed Sharding by cloning database from your own Database Gold Image across Availability Domains(ADs) **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -In this test case, you provision the Oracle Database sharding topology with System Sharding while provisioning the Catalog and Shard Databases by cloning from an existing Oracle Database Gold Image created earlier. +In this test case, you provision the Oracle Database sharding topology with System-Managed Sharding while provisioning the Catalog and Shard Databases by cloning from an existing Oracle Database Gold Image created earlier. This use case applies when you want to provision the database Pods on a Kubernetes Node in any availability domain (AD), which can also be different from the availability domain (AD) of the Block Volume that has the Oracle Database Gold Image provisioned earlier. @@ -30,6 +30,9 @@ NOTE: NOTE: In this case, the Persistent Volume with DB Gold Image was provisioned in the Availability Domain `PHX-AD-1`. The Shards and Catalog will be provisioned across multiple Availability Domains by cloning the database. + +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. + In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov_clone_across_ads.yaml`. diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md index cb67addb..f7aef949 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md @@ -1,4 +1,4 @@ -# Provisioning Oracle Sharded Database with System Sharding by cloning database from your own Database Gold Image in the same Availability Domain(AD) +# Provisioning Oracle Sharded Database with System-Managed Sharding by cloning database from your own Database Gold Image in the same Availability Domain(AD) **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. @@ -24,6 +24,8 @@ Choosing this option takes substantially less time during the Oracle Database Sh * Namespace: `shns` * Database Cloning from the Database Gold Image present in Persistent Volume having OCID: `ocid1.volume.oc1.phx.abyhqljr3z3w72t6ay5eud7d5w3kdfhktfp6gwb6euy5tzwfaxgmbvwqlvsq` +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. + NOTE: In this case, the Persistent Volume with DB Gold Image was provisioned in the Availability Domain `PHX-AD-1`. The Shards and Catalog will be provisioned in the same Availability Domain `PHX-AD-1` by cloning the database. In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_chunks_specified.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_chunks_specified.md new file mode 100644 index 00000000..0c6ea8fe --- /dev/null +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_chunks_specified.md @@ -0,0 +1,40 @@ +# Provisioning Oracle Sharded Database with System-Managed Sharding with number of chunks specified + +**IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. + +In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with System-Managed Sharding is deployed using Oracle Sharding controller. + +**NOTE:** In this use case, because DBCA creates the database automatically during the deployment, the time required to create the database is greater than the time it takes when the database is created by cloning from a Database Gold Image. + +By default, the System-Managed Sharding deploys the Sharded Database with 120 chunks per Shard Database. If, for example, we have three shards in the Sharded Database, it will be total of 360 chunks. In this example, the Sharded Database will be deployed with non-default number of chunks specified using parameter `CATALOG_CHUNKS`. + +This example uses `ssharding_shard_prov_chunks.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: + +* Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` +* Three sharding Pods: `shard1`, `shard2` and `shard3` +* One Catalog Pod: `catalog` +* Total number of chunks as `120` specified by variable `CATALOG_CHUNKS` (it will be 40 chunks per shard) +* Namespace: `shns` + + +In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) + * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov.yaml`. + * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + + +Use the file: [ssharding_shard_prov_chunks.yaml](./ssharding_shard_prov_chunks.yaml) for this use case as below: + +1. Deploy the `ssharding_shard_prov_chunks.yaml` file: + ```sh + kubectl apply -f ssharding_shard_prov_chunks.yaml + ``` +1. Check the status of the deployment: + ```sh + # Check the status of the Kubernetes Pods: + kubectl get all -n shns + + # Check the logs of a particular pod. For example, to check status of pod "shard1-0": + kubectl logs -f pod/shard1-0 -n shns + ``` diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_control_on_resources.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_control_on_resources.md index e14e76a4..c4f45a48 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_control_on_resources.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_control_on_resources.md @@ -1,8 +1,8 @@ -# Provisioning Oracle Sharded Database with System Sharding with additional control on resources like Memory and CPU allocated to Pods +# Provisioning Oracle Sharded Database with System-Managed Sharding with additional control on resources like Memory and CPU allocated to Pods **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -In this use case, there are additional tags used to control resources such as CPU and Memory used by the different Pods when the Oracle Sharding topology with System Sharding is deployed using Oracle Sharding controller. +In this use case, there are additional tags used to control resources such as CPU and Memory used by the different Pods when the Oracle Sharding topology with System-Managed Sharding is deployed using Oracle Sharding controller. This example uses `ssharding_shard_prov_memory_cpu.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: @@ -17,6 +17,9 @@ In this example, we are using pre-built Oracle Database and Global Data Services * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov_memory_cpu.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + + **NOTE:** For Oracle Database 23ai Free, you can control the `CPU` and `Memory` allocation of the PODs using tags `cpu` and `memory` respectively but tags `INIT_SGA_SIZE` and `INIT_PGA_SIZE` to control the SGA and PGA allocation at the database level are `not` supported. Use the YAML file [ssharding_shard_prov_memory_cpu.yaml](./ssharding_shard_prov_memory_cpu.yaml). diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_notification_using_oci_notification.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_notification_using_oci_notification.md index 90a8f803..1a6a1ee3 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_notification_using_oci_notification.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_notification_using_oci_notification.md @@ -1,4 +1,4 @@ -# Provisioning Oracle Sharded Database with System Sharding and send Notification using OCI Notification Service +# Provisioning Oracle Sharded Database with System-Managed Sharding and send Notification using OCI Notification Service **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. @@ -65,6 +65,9 @@ In this example, we are using pre-built Oracle Database and Global Data Services * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov_send_notification.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. Use the file: [ssharding_shard_prov_send_notification.yaml](./ssharding_shard_prov_send_notification.yaml) for this use case as below: diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_without_db_gold_image.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_without_db_gold_image.md index 8f10fd8c..b223d1af 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_without_db_gold_image.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_without_db_gold_image.md @@ -1,10 +1,10 @@ -# Provisioning Oracle Sharded Database with System Sharding without Database Gold Image +# Provisioning Oracle Sharded Database with System-Managed Sharding without Database Gold Image **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with System Sharding is deployed using Oracle Sharding controller. +In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with System-Managed Sharding is deployed using Oracle Sharding controller. -**NOTE** In this use case, because DBCA creates the database automatically during the deployment, the time required to create the database is greater than the time it takes when the database is created by cloning from a Database Gold Image. +**NOTE:** In this use case, because DBCA creates the database automatically during the deployment, the time required to create the database is greater than the time it takes when the database is created by cloning from a Database Gold Image. This example uses `ssharding_shard_prov.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: @@ -18,6 +18,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. Use the file: [ssharding_shard_prov.yaml](./ssharding_shard_prov.yaml) for this use case as below: @@ -33,4 +34,4 @@ Use the file: [ssharding_shard_prov.yaml](./ssharding_shard_prov.yaml) for this # Check the logs of a particular pod. For example, to check status of pod "shard1-0": kubectl logs -f pod/shard1-0 -n shns - ``` + ``` \ No newline at end of file diff --git a/docs/sharding/provisioning/system_sharding/ssharding_scale_in_delete_an_existing_shard.md b/docs/sharding/provisioning/system_sharding/ssharding_scale_in_delete_an_existing_shard.md index 4d5713d4..bca34253 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_scale_in_delete_an_existing_shard.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_scale_in_delete_an_existing_shard.md @@ -1,8 +1,8 @@ -# Scale In - Delete an existing Shard from a working Oracle Sharded Database provisioned earlier with System Sharding +# Scale In - Delete an existing Shard from a working Oracle Sharded Database provisioned earlier with System-Managed Sharding **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -This use case demonstrates how to delete an existing Shard from an existing Oracle Database sharding topology with System Sharding provisioned using Oracle Database Sharding controller. +This use case demonstrates how to delete an existing Shard from an existing Oracle Database sharding topology with System-Managed Sharding provisioned using Oracle Database Sharding controller. **NOTE** The deletion of a shard is done after verifying the Chunks have been moved out of that shard. @@ -17,8 +17,9 @@ In this example, we are using pre-built Oracle Database and Global Data Services * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov_delshard.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. -NOTE: Use tag `isDelete: true` to delete the shard you want. +NOTE: Use tag `isDelete: enable` to delete the shard you want. This use case deletes the shard `shard4` from the above Sharding Topology. diff --git a/docs/sharding/provisioning/system_sharding/ssharding_scale_out_add_shards.md b/docs/sharding/provisioning/system_sharding/ssharding_scale_out_add_shards.md index 5c349847..1db8e6c3 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_scale_out_add_shards.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_scale_out_add_shards.md @@ -1,8 +1,8 @@ -# Scale Out - Add Shards to an existing Oracle Sharded Database provisioned earlier with System Sharding +# Scale Out - Add Shards to an existing Oracle Sharded Database provisioned earlier with System-Managed Sharding **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -This use case demonstrates adding a new shard to an existing Oracle Database sharding topology with System Sharding provisioned earlier using Oracle Database Sharding controller. +This use case demonstrates adding a new shard to an existing Oracle Database sharding topology with System-Managed Sharding provisioned earlier using Oracle Database Sharding controller. In this use case, the existing Oracle Database sharding topology is having: @@ -15,6 +15,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov_extshard.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * If the existing Sharding Topology was deployed using [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then the additional parameter `dbEdition: "free"` will be needed for the below .yaml file as well. This use case adds two new shards `shard4`,`shard5` to above Sharding Topology. diff --git a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov.yaml b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov.yaml index f803ba42..7d4e16ec 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov.yaml +++ b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov.yaml @@ -55,4 +55,3 @@ spec: - name: oltp_ro_svc role: primary namespace: shns - diff --git a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_clone_across_ads.yaml b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_clone_across_ads.yaml index d2ae0ba9..b7dd1397 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_clone_across_ads.yaml +++ b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_clone_across_ads.yaml @@ -58,10 +58,18 @@ spec: imagePullPolicy: "Always" storageSizeInGb: 50 region: primary + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" - name: gsm2 imagePullPolicy: "Always" storageSizeInGb: 50 region: standby + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" storageClass: oci dbImage: container-registry.oracle.com/database/enterprise:latest dbImagePullSecret: ocr-reg-cred @@ -79,4 +87,4 @@ spec: role: primary - name: oltp_ro_svc role: primary - namespace: shns \ No newline at end of file + namespace: shns diff --git a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_delshard.yaml b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_delshard.yaml index 5f600d3f..75caca31 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_delshard.yaml +++ b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_delshard.yaml @@ -26,7 +26,7 @@ spec: shardGroup: shardgroup1 shardRegion: primary - name: shard4 - isDelete: True + isDelete: enable storageSizeInGb: 50 imagePullPolicy: "Always" shardGroup: shardgroup1 diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_from_gold_image_across_ads.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_from_gold_image_across_ads.md index 0beecbca..9b2905e8 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_from_gold_image_across_ads.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_from_gold_image_across_ads.md @@ -37,6 +37,8 @@ In this example, we are using pre-built Oracle Database and Global Data Services * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. + Use the file: [udsharding_shard_prov_clone_across_ads.yaml](./udsharding_shard_prov_clone_across_ads.yaml) for this use case as below: 1. Deploy the `udsharding_shard_prov_clone_across_ads.yaml` file: diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_gold_image_in_same_ad.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_gold_image_in_same_ad.md index 445d0105..a4669667 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_gold_image_in_same_ad.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_gold_image_in_same_ad.md @@ -33,6 +33,8 @@ In this example, we are using pre-built Oracle Database and Global Data Services * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. + Use the file: [udsharding_shard_prov_clone.yaml](./udsharding_shard_prov_clone.yaml) for this use case as below: 1. Deploy the `udsharding_shard_prov_clone.yaml` file: diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_control_on_resources.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_control_on_resources.md index d37368f8..b52b8745 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_control_on_resources.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_control_on_resources.md @@ -18,6 +18,9 @@ In this example, we are using pre-built Oracle Database and Global Data Services * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `udsharding_shard_prov_memory_cpu.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + +**NOTE:** For Oracle Database 23ai Free, you can control the `CPU` and `Memory` allocation of the PODs using tags `cpu` and `memory` respectively but tags `INIT_SGA_SIZE` and `INIT_PGA_SIZE` to control the SGA and PGA allocation at the database level are `not` supported. Use the YAML file [udsharding_shard_prov_memory_cpu.yaml](./udsharding_shard_prov_memory_cpu.yaml). diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_notification_using_oci_notification.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_notification_using_oci_notification.md index c7da6aa5..640301a2 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_notification_using_oci_notification.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_notification_using_oci_notification.md @@ -66,6 +66,9 @@ In this example, we are using pre-built Oracle Database and Global Data Services * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `udsharding_shard_prov_send_notification.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. Use the file: [udsharding_shard_prov_send_notification.yaml](./udsharding_shard_prov_send_notification.yaml) for this use case as below: diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_without_db_gold_image.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_without_db_gold_image.md index a275155f..2be5ac9f 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_without_db_gold_image.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_without_db_gold_image.md @@ -19,7 +19,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `udsharding_shard_prov.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) - + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. Use the file: [udsharding_shard_prov.yaml](./udsharding_shard_prov.yaml) for this use case as below: diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md index 946a0ab9..2c4cbfc2 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md @@ -4,8 +4,6 @@ This use case demonstrates how to delete an existing Shard from an existing Oracle Database sharding topology with User Defined Sharding provisioned using Oracle Database Sharding controller. -**NOTE** The deletion of a shard is done after verifying the Chunks have been moved out of that shard. - In this use case, the existing database Sharding is having: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` @@ -18,30 +16,50 @@ In this example, we are using pre-built Oracle Database and Global Data Services * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `udsharding_shard_prov_delshard.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. -NOTE: Use tag `isDelete: true` to delete the shard you want. +**NOTE:** Use tag `isDelete: enable` to delete the shard you want. This use case deletes the shard `shard4` from the above Sharding Topology. Use the file: [udsharding_shard_prov_delshard.yaml](./udsharding_shard_prov_delshard.yaml) for this use case as below: -1. Deploy the `udsharding_shard_prov_delshard.yaml` file: +1. Move out the chunks from the shard to be deleted to another shard. For example, in the current case, before deleting the `shard4`, if you want to move the chunks from `shard4` to `shard2`, then you can run the below `kubectl` command where `/u01/app/oracle/product/23ai/gsmhome_1` is the GSM HOME: + ```sh + kubectl exec -it pod/gsm1-0 -n shns -- /u01/app/oracle/product/23ai/gsmhome_1/bin/gdsctl "move chunk -chunk all -source shard4_shard4pdb -target shard4_shard4pdb" + ``` +2. Confirm the shard to be deleted (`shard4` in this case) is not having any chunk using below command: + ```sh + kubectl exec -it pod/gsm1-0 -n shns -- /u01/app/oracle/product/23ai/gsmhome_1/bin/gdsctl "config chunks" + ``` + If there is no chunk present in the shard to be deleted, you can move to the next step. + +3. Apply the `udsharding_shard_prov_delshard.yaml` file: ```sh kubectl apply -f udsharding_shard_prov_delshard.yaml ``` -2. Check the status of the deployment: +4. Check the status of the deployment: ```sh # Check the status of the Kubernetes Pods: kubectl get all -n shns + ``` -**NOTE:** After you apply `udsharding_shard_prov_delshard.yaml`, the change may not be visible immediately. When the shard is removed, first the chunks will be moved out of that shard that is going to be deleted. +**NOTE:** +- After you apply `udsharding_shard_prov_delshard.yaml`, the change may not be visible immediately and it may take some time for the delete operation to complete. +- If the shard, that you are trying to delete, is still having chunks, then the you will see message like below in the logs of the Oracle Database Operator Pod. + ```sh + INFO controllers.database.ShardingDatabase manual intervention required + ``` + In this case, you will need to first move out the chunks from the shard to be deleted using Step 2 above and then apply the file in Step 3 to delete that shard. -To monitor the chunk movement, use the following command: +To check the status, use the following command: + ```sh + # Switch to the primary GSM Container: + kubectl exec -i -t gsm1-0 -n shns /bin/bash -```sh -# Switch to the primary GSM Container: -kubectl exec -i -t gsm1-0 -n shns /bin/bash + # Check the status shards: + gdsctl config shard -# Check the status of the chunks and repeat to observe the chunk movement: -gdsctl config chunks -``` + # Check the status of the chunks: + gdsctl config chunks + ``` diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_out_add_shards.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_out_add_shards.md index e4200d72..20f50b29 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_out_add_shards.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_out_add_shards.md @@ -16,6 +16,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `udsharding_shard_prov_extshard.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * If the existing Sharding Topology was deployed using [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then the additional parameter `dbEdition: "free"` will be needed for the below .yaml file as well. This use case adds two new shards `shard4`,`shard5` to above Sharding Topology. diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov.yaml b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov.yaml index 019fc887..9b565b73 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov.yaml +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov.yaml @@ -56,4 +56,3 @@ spec: - name: oltp_ro_svc role: primary namespace: shns - diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_clone_across_ads.yaml b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_clone_across_ads.yaml index f2d10e23..28f36608 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_clone_across_ads.yaml +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_clone_across_ads.yaml @@ -58,10 +58,18 @@ spec: imagePullPolicy: "Always" storageSizeInGb: 50 region: primary + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" - name: gsm2 imagePullPolicy: "Always" storageSizeInGb: 50 region: standby + nodeSelector: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" + pvMatchLabels: + "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" storageClass: oci dbImage: container-registry.oracle.com/database/enterprise:latest dbImagePullSecret: ocr-reg-cred diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_delshard.yaml b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_delshard.yaml index 51f1c292..2342dc55 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_delshard.yaml +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_delshard.yaml @@ -30,7 +30,7 @@ spec: imagePullPolicy: "Always" shardSpace: sspace4 shardRegion: primary - isDelete: True + isDelete: enable - name: shard5 storageSizeInGb: 50 imagePullPolicy: "Always" @@ -66,4 +66,4 @@ spec: role: primary - name: oltp_ro_svc role: primary - namespace: shns + namespace: shns \ No newline at end of file diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_memory_cpu.yaml b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_memory_cpu.yaml index 15022925..e663aa65 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_memory_cpu.yaml +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_memory_cpu.yaml @@ -87,4 +87,3 @@ spec: - name: oltp_ro_svc role: primary namespace: shns - diff --git a/docs/sidb/README.md b/docs/sidb/README.md index 895c26a9..ff357195 100644 --- a/docs/sidb/README.md +++ b/docs/sidb/README.md @@ -25,6 +25,7 @@ Oracle Database Operator for Kubernetes (`OraOperator`) includes the Single Inst * [Delete a Database](#delete-a-database) * [Advanced Database Configurations](#advanced-database-configurations) * [Run Database with Multiple Replicas](#run-database-with-multiple-replicas) + * [Database Pod Resource Management](#database-pod-resource-management) * [Setup Database with LoadBalancer](#setup-database-with-loadbalancer) * [Enabling TCPS Connections](#enabling-tcps-connections) * [Specifying Custom Ports](#specifying-custom-ports) @@ -628,6 +629,9 @@ The following table depicts the fail over matrix for any destructive operation t - If the `ReadWriteOnce` access mode is used, all the replicas will be scheduled on the same node where the persistent volume would be mounted. - If the `ReadWriteMany` access mode is used, all the replicas will be distributed on different nodes. So, it is recommended to have replicas more than or equal to the number of the nodes as the database image is downloaded on all those nodes. This is beneficial in quick cold fail-over scenario (when the active pod dies) as the image would already be available on that node. +#### Database Pod Resource Management +When creating a Single Instance Database you can specify the cpu and memory resources needed by the database pod. These specified resources are passed to the `kube-scheduler` so that the pod gets scheduled on one of the pods that has the required resources available. To use database pod resource management specify values for the `resources` attributes in the [config/samples/sidb/singleinstancedatabase.yaml](../../config/samples/sidb/singleinstancedatabase.yaml) file, and apply it. + #### Setup Database with LoadBalancer For the Single Instance Database, the default service is the `NodePort` service. You can enable the `LoadBalancer` service by using `kubectl patch` command. diff --git a/main.go b/main.go index acdd2719..4174e97d 100644 --- a/main.go +++ b/main.go @@ -249,6 +249,9 @@ func main() { setupLog.Error(err, "unable to create webhook", "webhook", "DataguardBroker") os.Exit(1) } + if err = (&databasev1alpha1.ShardingDatabase{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "ShardingDatabase") + } if err = (&observabilityv1alpha1.DatabaseObserver{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "DatabaseObserver") os.Exit(1) diff --git a/oracle-database-operator.yaml b/oracle-database-operator.yaml index c2714b6e..504fc7cd 100644 --- a/oracle-database-operator.yaml +++ b/oracle-database-operator.yaml @@ -165,6 +165,8 @@ spec: type: string displayName: type: string + isLongTermBackup: + type: boolean ociConfig: description: "*********************** *\tOCI config ***********************" properties: @@ -173,6 +175,8 @@ spec: secretName: type: string type: object + retentionPeriodInDays: + type: integer target: description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run "make" to regenerate code after modifying this file' properties: @@ -1770,6 +1774,9 @@ spec: asClone: description: Indicate if 'AS CLONE' option should be used in the command to plug in a PDB. This property is applicable when the Action property is PLUG but not required. type: boolean + assertivePdbDeletion: + description: turn on the assertive approach to delete pdb resource kubectl delete pdb ..... automatically triggers the pluggable database deletion + type: boolean cdbName: description: Name of the CDB type: string @@ -2077,7 +2084,7 @@ spec: description: PullPolicy describes a policy for if/when to pull a container image type: string isDelete: - type: boolean + type: string label: type: string name: @@ -2140,6 +2147,8 @@ spec: - name type: object type: array + dbEdition: + type: string dbImage: type: string dbImagePullSecret: @@ -2169,6 +2178,8 @@ spec: - name - pwdFileName type: object + fssStorageClass: + type: string gsm: items: description: GsmSpec defines the desired state of GsmSpec @@ -2193,7 +2204,7 @@ spec: description: PullPolicy describes a policy for if/when to pull a container image type: string isDelete: - type: boolean + type: string label: type: string name: @@ -2370,7 +2381,7 @@ spec: isExternalSvc: type: boolean isTdeWallet: - type: boolean + type: string liveinessCheckPeriod: type: integer namespace: @@ -2424,7 +2435,12 @@ spec: description: PullPolicy describes a policy for if/when to pull a container image type: string isDelete: - type: boolean + enum: + - enable + - disable + - failed + - force + type: string label: type: string name: @@ -2507,6 +2523,10 @@ spec: type: string storageClass: type: string + tdeWalletPvc: + type: string + tdeWalletPvcMountLocation: + type: string required: - catalog - dbImage @@ -2770,6 +2790,23 @@ spec: type: integer replicas: type: integer + resources: + properties: + limits: + properties: + cpu: + type: string + memory: + type: string + type: object + requests: + properties: + cpu: + type: string + memory: + type: string + type: object + type: object serviceAccountName: type: string serviceAnnotations: @@ -3082,6 +3119,21 @@ rules: - patch - update - watch +- apiGroups: + - "" + resources: + - deployments + - events + - pods + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - "" resources: @@ -3112,6 +3164,7 @@ rules: verbs: - get - list + - watch - apiGroups: - '''''' resources: @@ -3128,6 +3181,12 @@ rules: - apps resources: - configmaps + verbs: + - get + - list +- apiGroups: + - apps + resources: - deployments - pods - services @@ -3193,6 +3252,26 @@ rules: - patch - update - watch +- apiGroups: + - "" + resources: + - configmaps + - containers + - events + - namespaces + - pods + - pods/exec + - pods/log + - secrets + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - "" resources: @@ -3489,7 +3568,6 @@ rules: - apiGroups: - monitoring.coreos.com resources: - - prometheusrules - servicemonitors verbs: - create @@ -3784,6 +3862,26 @@ webhooks: resources: - pdbs sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /mutate-database-oracle-com-v1alpha1-shardingdatabase + failurePolicy: Fail + name: mshardingdatabase.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - shardingdatabases + sideEffects: None - admissionReviewVersions: - v1 - v1beta1 @@ -3805,6 +3903,26 @@ webhooks: resources: - singleinstancedatabases sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /mutate-observability-oracle-com-v1alpha1-databaseobserver + failurePolicy: Fail + name: mdatabaseobserver.kb.io + rules: + - apiGroups: + - observability.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - databaseobservers + sideEffects: None --- apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration @@ -3977,6 +4095,27 @@ webhooks: resources: - pdbs sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /validate-database-oracle-com-v1alpha1-shardingdatabase + failurePolicy: Fail + name: vshardingdatabase.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + - DELETE + resources: + - shardingdatabases + sideEffects: None - admissionReviewVersions: - v1 - v1beta1 @@ -3999,6 +4138,26 @@ webhooks: resources: - singleinstancedatabases sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /validate-observability-oracle-com-v1alpha1-databaseobserver + failurePolicy: Fail + name: vdatabaseobserver.kb.io + rules: + - apiGroups: + - observability.oracle.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - databaseobservers + sideEffects: None --- apiVersion: apps/v1 kind: Deployment diff --git a/ords/Dockerfile b/ords/Dockerfile index d4e16b6c..772a7e6d 100644 --- a/ords/Dockerfile +++ b/ords/Dockerfile @@ -1,34 +1,63 @@ -#LICENSE UPL 1.0 -# -# Copyright (c) 1982-2017 Oracle and/or its affiliates. All rights reserved. -# -# ORACLE DOCKERFILES PROJECT -# -------------------------- -# This is the Dockerfile for Oracle Rest Data Services 22.2 -# +## Copyright (c) 2022 Oracle and/or its affiliates. +## +## The Universal Permissive License (UPL), Version 1.0 +## +## Subject to the condition set forth below, permission is hereby granted to any +## person obtaining a copy of this software, associated documentation and/or data +## (collectively the "Software"), free of charge and under any and all copyright +## rights in the Software, and any and all patent rights owned or freely +## licensable by each licensor hereunder covering either (i) the unmodified +## Software as contributed to or provided by such licensor, or (ii) the Larger +## Works (as defined below), to deal in both +## +## (a) the Software, and +## (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +## one is included with the Software (each a "Larger Work" to which the Software +## is contributed by such licensors), +## +## without restriction, including without limitation the rights to copy, create +## derivative works of, display, perform, and distribute the Software and make, +## use, sell, offer for sale, import, export, have made, and have sold the +## Software and the Larger Work(s), and to sublicense the foregoing rights on +## either these or other terms. +## +## This license is subject to the following condition: +## The above copyright notice and either this complete permission notice or at +## a minimum a reference to the UPL must be included in all copies or +## substantial portions of the Software. +## +## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +## SOFTWARE. + FROM container-registry.oracle.com/java/jdk:latest # Environment variables required for this build (do NOT change) # ------------------------------------------------------------- ENV ORDS_HOME=/opt/oracle/ords/ \ - RUN_FILE="runOrdsSSL.sh" - -#RUN_FILE_NOSSL="runOrdsNOSSL.sh" + RUN_FILE="runOrdsSSL.sh" \ + ORDSVERSION=23.4.0-8 # Copy binaries # ------------- COPY $RUN_FILE $ORDS_HOME -#COPY $RUN_FILE_NOSSL $ORDS_HOME -RUN yum -y install yum-utils bind-utils tree hostname openssl net-tools zip unzip tar wget vim-minimal which sudo expect procps && \ +RUN yum -y install yum-utils bind-utils tree hostname openssl net-tools zip unzip tar wget vim-minimal which sudo expect procps curl lsof && \ yum-config-manager --add-repo=http://yum.oracle.com/repo/OracleLinux/OL8/oracle/software/x86_64 && \ yum -y install java-11-openjdk-devel && \ - yum -y install ords && \ yum -y install iproute && \ yum clean all +RUN curl -o /tmp/ords-$ORDSVERSION.el8.noarch.rpm https://yum.oracle.com/repo/OracleLinux/OL8/oracle/software/x86_64/getPackage/ords-$ORDSVERSION.el8.noarch.rpm + +RUN rpm -ivh /tmp/ords-$ORDSVERSION.el8.noarch.rpm + # Setup filesystem and oracle user -# ------------------------------------------------------------ +# -------------------------------- RUN mkdir -p $ORDS_HOME/doc_root && \ mkdir -p $ORDS_HOME/error && \ mkdir -p $ORDS_HOME/secrets && \ @@ -49,5 +78,3 @@ EXPOSE 8888 # Define default command to start Ords Services CMD $ORDS_HOME/$RUN_FILE -## ONLY FOR DEVELOPMENT STAGE -#CMD ["/usr/sbin/init"] diff --git a/ords/runOrdsSSL.sh b/ords/runOrdsSSL.sh index 23b99f1e..35f1b77b 100644 --- a/ords/runOrdsSSL.sh +++ b/ords/runOrdsSSL.sh @@ -1,16 +1,44 @@ #!/bin/bash -# -# Since: June, 2022 -# Author: matteo.malvezzi@oracle.com -# Description: Setup and runs Oracle Rest Data Services 22.2. -# -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. -# -# Copyright (c) 2014-2017 Oracle and/or its affiliates. All rights reserved. -# -# MODIFIED (DD-Mon-YY) -# mmalvezz 25-Jun-22 - Initial version -# mmalvezz 17-Oct-22 - db.customURL utilization + +cat <$TNSNAME - - function SetParameter() { ##ords config info <--- Use this command to get the list @@ -67,65 +91,16 @@ function SetParameter() { $ORDS --config ${CONFIG} config set misc.pagination.maxRows 1000 $ORDS --config ${CONFIG} config set db.cdb.adminUser "${CDBADMIN_USER:-C##DBAPI_CDB_ADMIN} AS SYSDBA" $ORDS --config ${CONFIG} config secret --password-stdin db.cdb.adminUser.password << EOF -${CDBADMIN_PWD:-WElcome_12##} +${CDBADMIN_PWD:-PROVIDE_A_PASSWORD} EOF -## $ORDS --config ${CONFIG} config set db.username "SYS AS SYSDBA" -## $ORDS --config ${CONFIG} config secret --password-stdin db.password <$PASSFILE -welcome1 -EOF - -## $JAVA_HOME/bin/keytool -genkey -keyalg RSA -alias selfsigned -keystore keystore.jks \ -## -dname "CN=${HN}, OU=Example Department, O=Example Company, L=Birmingham, ST=West Midlands, C=GB" \ -## -storepass welcome1 -validity 3600 -keysize 2048 -keypass welcome1 -## -## -## $JAVA_HOME/bin/keytool -importkeystore -srckeystore keystore.jks -srcalias selfsigned -srcstorepass welcome1 \ -## -destkeystore keystore.p12 -deststoretype PKCS12 -deststorepass welcome1 -destkeypass welcome1 -## -## -## ${OPENSSL} pkcs12 -in ${KEYSTORE}/keystore.p12 -nodes -nocerts -out ${KEYSTORE}/${HN}-key.pem -passin file:${PASSFILE} -## ${OPENSSL} pkcs12 -in ${KEYSTORE}/keystore.p12 -nokeys -out ${KEYSTORE}/${HN}.pem -passin file:${PASSFILE} -## ${OPENSSL} pkcs8 -topk8 -inform PEM -outform DER -in ${HN}-key.pem -out ${HN}-key.der -nocrypt -## ${OPENSSL} x509 -inform PEM -outform DER -in ${HN}.pem -out ${HN}.der - - - - - - - - -rm $PASSFILE -ls -ltr $KEYSTORE - - - -} - - function setupOrds() { echo "====================================================" @@ -163,7 +138,6 @@ export ORDS_LOGS=/tmp ORDS_PASSWORD=`cat $ORDS_HOME/secrets/$ORDS_PWD_KEY` } -setupHTTPS; SetParameter; $ORDS --config ${CONFIG} install \ @@ -173,8 +147,8 @@ $ORDS --config ${CONFIG} install \ --log-folder ${ORDS_LOGS} \ --proxy-user \ --password-stdin <${CKF} 2>&1 +echo "checkfile" >> ${CKF} +NOT_INSTALLED=`cat ${CKF} | grep "INFO: The" |wc -l ` echo NOT_INSTALLED=$NOT_INSTALLED + function StartUp () { $ORDS --config $CONFIG serve --port 8888 --secure } From ec60d939f6cbcc35e015d757237af04056eb9c54 Mon Sep 17 00:00:00 2001 From: Oleksandra Pavlusieva Date: Wed, 4 Sep 2024 12:15:10 +0300 Subject: [PATCH 08/24] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 959045cb..26c4ead8 100644 --- a/README.md +++ b/README.md @@ -126,8 +126,8 @@ Oracle strongly recommends that you ensure your system meets the following [Prer ```sh kubectl apply -f rbac/node-rbac.yaml ``` -# Installation -## Install Oracle DB Operator +## Installation +### Install Oracle DB Operator After you have completed the preceding prerequisite changes, you can install the operator. To install the operator in the cluster quickly, you can apply the modified `oracle-database-operator.yaml` file from the preceding step. From ccc898289059345c37c68a09df11762dba5acda9 Mon Sep 17 00:00:00 2001 From: Saurabh Ahuja Date: Wed, 4 Sep 2024 15:21:13 +0530 Subject: [PATCH 09/24] Update README.md --- README.md | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 26c4ead8..d707bc87 100644 --- a/README.md +++ b/README.md @@ -154,7 +154,7 @@ Oracle strongly recommends that you ensure your system meets the following [Prer You should see that the operator is up and running, along with the shipped controllers. For more details, see [Oracle Database Operator Installation Instructions](./docs/installation/OPERATOR_INSTALLATION_README.md). - +## Documentation ## Getting Started with the Operator (Quickstart) The following quickstarts are designed for specific database configurations: @@ -170,6 +170,7 @@ The following quickstarts are designed for specific database configurations: The following quickstart is designed for non-database configurations: * [Oracle Database Observability](./docs/observability/README.md) +## Examples YAML file templates are available under [`/config/samples`](./config/samples/). You can copy and edit these template files to configure them for your use cases. ## Uninstall the Operator @@ -221,15 +222,15 @@ YAML file templates are available under [`/config/samples`](./config/samples/). ## Contributing -See [Contributing to this Repository](./CONTRIBUTING.md) +This project welcomes contributions from the community. Before submitting a pull request, please [review our contribution guide](./CONTRIBUTING.md) -## Support +## Help You can submit a GitHub issue, oir submit an issue and then file an [Oracle Support service](https://support.oracle.com/portal/) request. To file an issue or a service request, use the following product ID: 14430. ## Security -Secure platforms are an important basis for general system security. Ensure that your deployment is in compliance with common security practices. +Please consult the [security guide](./SECURITY.md) for our responsible security vulnerability disclosure process ### Managing Sensitive Data @@ -244,10 +245,6 @@ The following is an example of a YAML file fragment for specifying Oracle Cloud Examples in this repository where passwords are entered on the command line are for demonstration purposes only. -### Reporting a Security Issue - -See [Reporting security vulnerabilities](./SECURITY.md) - ## License Copyright (c) 2022, 2024 Oracle and/or its affiliates. From 29a38c49fced4fc626e17d3b82f6326180551ee0 Mon Sep 17 00:00:00 2001 From: Saurabh Ahuja Date: Wed, 4 Sep 2024 15:23:02 +0530 Subject: [PATCH 10/24] Update SECURITY.md --- SECURITY.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/SECURITY.md b/SECURITY.md index 30159518..2ca81027 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1,4 +1,4 @@ -# Oracle's instructions for reporting security vulnerabilities +# Reporting security vulnerabilities Oracle values the independent security research community and believes that responsible disclosure of security vulnerabilities helps us ensure the security @@ -21,7 +21,7 @@ security features are welcome on GitHub Issues. Security updates will be released on a regular cadence. Many of our projects will typically release security fixes in conjunction with the -[Oracle Critical Patch Update][3] program. Additional +Oracle Critical Patch Update program. Additional information, including past advisories, is available on our [security alerts][4] page. @@ -35,4 +35,4 @@ sufficiently hardened for production use. [1]: mailto:secalert_us@oracle.com [2]: https://www.oracle.com/corporate/security-practices/assurance/vulnerability/reporting.html [3]: https://www.oracle.com/security-alerts/encryptionkey.html -[4]: https://www.oracle.com/security-alerts/ \ No newline at end of file +[4]: https://www.oracle.com/security-alerts/ From 2479f3c2eb8092d6cbbeccc9502d884fa9705c56 Mon Sep 17 00:00:00 2001 From: jpverma85 Date: Fri, 6 Sep 2024 15:04:55 -0400 Subject: [PATCH 11/24] gdd-changes (#146) --- PREREQUISITES.md | 6 +- README.md | 10 +- docs/sharding/README.md | 119 +++++++++--------- .../provisioning/database_connection.md | 6 +- docs/sharding/provisioning/debugging.md | 8 +- .../sharding_provisioning_with_db_events.md | 8 +- .../sharding_provisioning_with_free_images.md | 8 +- ...harding_provisioning_with_free_images.yaml | 4 +- ..._persistent_volume_having_db_gold_image.md | 2 +- ...y_cloning_db_from_gold_image_across_ads.md | 14 +-- ...ing_by_cloning_db_gold_image_in_same_ad.md | 14 +-- ...ding_provisioning_with_chunks_specified.md | 10 +- ..._provisioning_with_control_on_resources.md | 6 +- ...ith_notification_using_oci_notification.md | 12 +- ...ding_provisioning_without_db_gold_image.md | 8 +- ...rding_scale_in_delete_an_existing_shard.md | 6 +- .../snr_ssharding_scale_out_add_shards.md | 10 +- ...y_cloning_db_from_gold_image_across_ads.md | 10 +- ...ing_by_cloning_db_gold_image_in_same_ad.md | 8 +- ...ding_provisioning_with_chunks_specified.md | 6 +- ..._provisioning_with_control_on_resources.md | 4 +- ...ith_notification_using_oci_notification.md | 8 +- ...ding_provisioning_without_db_gold_image.md | 4 +- ...rding_scale_in_delete_an_existing_shard.md | 8 +- .../ssharding_scale_out_add_shards.md | 10 +- ...y_cloning_db_from_gold_image_across_ads.md | 14 +-- ...ing_by_cloning_db_gold_image_in_same_ad.md | 12 +- ..._provisioning_with_control_on_resources.md | 8 +- ...ith_notification_using_oci_notification.md | 12 +- ...ding_provisioning_without_db_gold_image.md | 10 +- ...rding_scale_in_delete_an_existing_shard.md | 10 +- .../udsharding_scale_out_add_shards.md | 14 +-- 32 files changed, 195 insertions(+), 194 deletions(-) diff --git a/PREREQUISITES.md b/PREREQUISITES.md index 01bb94b4..bc333357 100644 --- a/PREREQUISITES.md +++ b/PREREQUISITES.md @@ -6,7 +6,7 @@ Oracle Database Operator for Kubernetes (OraOperator) manages all Cloud deployme * Oracle Autonomous Database (ADB) * Containerized Oracle Database Single Instance (SIDB) -* Containerized Sharded Oracle Database (SHARDING) +* Containerized Oracle Globally Distributed Database (GDD) ### Setting Up a Kubernetes Cluster and Volumes Review and complete each step as needed. @@ -29,6 +29,6 @@ If you intent to use `OraOperator` to handle Oracle Autonomous Database lifecycl If you intent to use `OraOperator` to handle Oracle Database Single Instance lifecycles, then read [Single Instance Database Prerequisites](./docs/sidb/PREREQUISITES.md) -### Prerequites for Sharded Databases (SHARDING) +### Prerequites for Oracle Globally Distributed Databases(GDD) - If you intent to use OraOperator to handle the lifecycle of Oracle Database deployed with Oracle Sharding, then read [Sharded Database Prerequisites](./docs/sharding/README.md#prerequsites-for-running-oracle-sharding-database-controller) + If you intent to use OraOperator to handle the lifecycle of Oracle Globally Distributed Database(GDD), then read [Oracle Globally Distributed Database Prerequisites](./docs/sharding/README.md#prerequsites-for-running-oracle-sharding-database-controller) diff --git a/README.md b/README.md index d707bc87..3409463b 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ In this v1.1.0 production release, `OraOperator` supports the following database * Oracle Autonomous Database on dedicated Cloud infrastructure (ADB-D) * Oracle Autonomous Container Database (ACD) (infrastructure) is the infrastructure for provisioning Autonomous Databases. * Containerized Single Instance databases (SIDB) deployed in the Oracle Kubernetes Engine (OKE) and any k8s where OraOperator is deployed -* Containerized Sharded databases (SHARDED) deployed in OKE and any k8s where OraOperator is deployed +* Containerized Oracle Globally Distributed Databases(GDD) deployed in OKE and any k8s where OraOperator is deployed * Oracle Multitenant Databases (CDB/PDBs) * Oracle Base Database Cloud Service (BDBCS) * Oracle Data Guard (Preview status) @@ -23,7 +23,7 @@ Oracle will continue to extend `OraOperator` to support additional Oracle Databa * Namespace scope deployment option * Enhanced security with namespace scope deployment option * Support for Oracle Database 23ai Free (with SIDB) -* Automatic Storage Expansion for SIDB and Sharded DB +* Automatic Storage Expansion for SIDB and Oracle Globally Distributed Database * User-Defined Sharding * TCPS support customer provided certs * Execute custom scripts during DB setup/startup @@ -41,7 +41,7 @@ This release of Oracle Database Operator for Kubernetes (the operator) supports * ADB-S/ADB-D: Provision, bind, start, stop, terminate (soft/hard), scale (up/down), long-term backup, manual restore * ACD: provision, bind, restart, terminate (soft/hard) * SIDB: Provision, clone, patch (in-place/out-of-place), update database initialization parameters, update database configuration (Flashback, archiving), Oracle Enterprise Manager (EM) Express (a basic observability console), Oracle REST Data Service (ORDS) to support REST based SQL, PDB management, SQL Developer Web, and Application Express (Apex) -* SHARDED: Provision/deploy sharded databases and the shard topology, Add a new shard, Delete an existing shard +* GDD: Provision/deploy Oracle Globally Distributed Databases and the GDD topology, Add a new shard, Delete an existing shard * Oracle Multitenant Database: Bind to a CDB, Create a  PDB, Plug a  PDB, Unplug a PDB, Delete a PDB, Clone a PDB, Open/Close a PDB * Oracle Base Database Cloud Service (BDBCS): provision, bind, scale shape Up/Down, Scale Storage Up, Terminate and Update License * Oracle Data Guard: Provision a Standby for the SIDB resource, Create a Data Guard Configuration, Perform a Switchover, Patch Primary and Standby databases in Data Guard Configuration @@ -162,7 +162,7 @@ The following quickstarts are designed for specific database configurations: * [Oracle Autonomous Database](./docs/adb/README.md) * [Oracle Autonomous Container Database](./docs/adb/ACD.md) * [Containerized Oracle Single Instance Database and Data Guard](./docs/sidb/README.md) -* [Containerized Oracle Sharded Database](./docs/sharding/README.md) +* [Containerized Oracle Globally Distributed Database](./docs/sharding/README.md) * [Oracle Multitenant Database](./docs/multitenant/README.md) * [Oracle Base Database Cloud Service (BDBCS)](./docs/dbcs/README.md) @@ -217,7 +217,7 @@ YAML file templates are available under [`/config/samples`](./config/samples/). * [Oracle Autonomous Database](https://docs.oracle.com/en-us/iaas/Content/Database/Concepts/adboverview.htm) * [Components of Dedicated Autonomous Database](https://docs.oracle.com/en-us/iaas/autonomous-database/doc/components.html) * [Oracle Database Single Instance](https://docs.oracle.com/en/database/oracle/oracle-database/) -* [Oracle Database Sharding](https://docs.oracle.com/en/database/oracle/oracle-database/21/shard/index.html) +* [Oracle Globally Distributed Database](https://docs.oracle.com/en/database/oracle/oracle-database/21/shard/index.html) * [Oracle Database Cloud Service](https://docs.oracle.com/en/database/database-cloud-services.html) ## Contributing diff --git a/docs/sharding/README.md b/docs/sharding/README.md index 0c817467..a5c7c470 100644 --- a/docs/sharding/README.md +++ b/docs/sharding/README.md @@ -1,36 +1,36 @@ -# Using Oracle Sharding with Oracle Database Operator for Kubernetes +# Using Oracle Globally Distributed Database with Oracle Database Operator for Kubernetes -Oracle Sharding distributes segments of a data set across many databases (shards) on different computers, either on-premises or in cloud. Sharding enables globally distributed, linearly scalable, multimodel databases. It requires no specialized hardware or software. Oracle Sharding does all this while rendering the strong consistency, full power of SQL, support for structured and unstructured data, and the Oracle Database ecosystem. It meets data sovereignty requirements, and supports applications that require low latency and high availability. +Oracle Globally Distributed Database distributes segments of a data set across many databases (shards) on different computers, either on-premises or in cloud. This feature enables globally distributed, linearly scalable, multimodel databases. It requires no specialized hardware or software. Oracle Globally Distributed Database does all this while rendering the strong consistency, full power of SQL, support for structured and unstructured data, and the Oracle Database ecosystem. It meets data sovereignty requirements, and supports applications that require low latency and high availability. -All of the shards together make up a single logical database, which is referred to as a sharded database (SDB). +All of the shards together make up a single logical database, which is referred to as a Oracle Globally Distributed Database (GDD). -Kubernetes provides infrastructure building blocks, such as compute, storage, and networks. Kubernetes makes the infrastructure available as code. It enables rapid provisioning of multi-node topolgies. Additionally, Kubernetes also provides statefulsets, which are the workload API objects that are used to manage stateful applications. This provides us lifecycle management elasticity for databases as a stateful application for various database topologies, such as sharded databases, Oracle Real Application Clusters (Oracle RAC), single instance Oracle Database, and other Oracle features and configurations. +Kubernetes provides infrastructure building blocks, such as compute, storage, and networks. Kubernetes makes the infrastructure available as code. It enables rapid provisioning of multi-node topolgies. Additionally, Kubernetes also provides statefulsets, which are the workload API objects that are used to manage stateful applications. This provides us lifecycle management elasticity for databases as a stateful application for various database topologies, such as Oracle Globally Distributed Database, Oracle Real Application Clusters (Oracle RAC), single instance Oracle Database, and other Oracle features and configurations. -The Sharding Database controller in Oracle Database Operator deploys Oracle Sharding topology as a statefulset in the Kubernetes clusters, using Oracle Database and Global Data Services Docker images. The Oracle Sharding database controller manages the typical lifecycle of Oracle Sharding topology in the Kubernetes cluster, as shown below: +The Sharding Database controller in Oracle Database Operator deploys Oracle Globally Distributed Database Topology as a statefulset in the Kubernetes clusters, using Oracle Database and Global Data Services Docker images. The Oracle Sharding database controller manages the typical lifecycle of Oracle Globally Distributed Database topology in the Kubernetes cluster, as shown below: * Create primary statefulsets shards * Create master and standby Global Data Services statefulsets * Create persistent storage, along with statefulset * Create services * Create load balancer service -* Provision sharding topology by creating and configuring the following: +* Provision Oracle Globally Distributed Database topology by creating and configuring the following: * Catalog database * Shard Databases * GSMs * Shard scale up and scale down * Shard topology cleanup -The Oracle Sharding database controller provides end-to-end automation of Oracle Database sharding topology deployment in Kubernetes clusters. +The Oracle Sharding database controller provides end-to-end automation of Oracle Globally Distributed Database topology deployment in Kubernetes clusters. ## Using Oracle Database Operator Sharding Controller -Following sections provide the details for deploying Oracle Globally Distributed Database (Oracle Sharded Database) using Oracle Database Operator Sharding Controller with different use cases: +Following sections provide the details for deploying Oracle Globally Distributed Database using Oracle Database Operator Sharding Controller with different use cases: * [Prerequisites for running Oracle Sharding Database Controller](#prerequisites-for-running-oracle-sharding-database-controller) * [Oracle Database 23ai Free](#oracle-database-23ai-free) -* [Provisioning Sharding Topology with System-Managed Sharding in a Cloud-Based Kubernetes Cluster](#provisioning-sharding-topology-with-system-managed-sharding-in-a-cloud-based-kubernetes-cluster) -* [Provisioning Sharding Topology with User Defined Sharding in a Cloud-Based Kubernetes Cluster](#provisioning-sharding-topology-with-user-defined-sharding-in-a-cloud-based-kubernetes-cluster) -* [Provisioning System-Managed Sharding Topology with Raft replication enabled in a Cloud-Based Kubernetes Cluster](#provisioning-system-managed-sharding-topology-with-raft-replication-enabled-in-a-cloud-based-kubernetes-cluster) +* [Provisioning Oracle Globally Distributed Database Topology System-Managed Sharding in a Cloud-Based Kubernetes Cluster](#provisioning-oracle-globally-distributed-database-topology-with-system-managed-sharding-in-a-cloud-based-kubernetes-cluster) +* [Provisioning Oracle Globally Distributed Database Topology User Defined Sharding in a Cloud-Based Kubernetes Cluster](#provisioning-oracle-globally-distributed-database-topology-with-user-defined-sharding-in-a-cloud-based-kubernetes-cluster) +* [Provisioning Oracle Globally Distributed Database System-Managed Sharding with Raft replication enabled in a Cloud-Based Kubernetes Cluster](#provisioning-oracle-globally-distributed-database-topology-with-system-managed-sharding-and-raft-replication-enabled-in-a-cloud-based-kubernetes-cluster) * [Connecting to Shard Databases](#connecting-to-shard-databases) * [Debugging and Troubleshooting](#debugging-and-troubleshooting) @@ -72,8 +72,9 @@ Choose one of the following deployment options: **Use Oracle-Supplied Docker Images:** The Oracle Sharding Database controller uses Oracle Global Data Services and Oracle Database images to provision the sharding topology. - You can also download the pre-built Oracle Global Data Services `container-registry.oracle.com/database/gsm:latest` and Oracle Database images `container-registry.oracle.com/database/enterprise:latest` from [Oracle Container Registry](https://container-registry.oracle.com/ords/f?p=113:10::::::). These images are functionally tested and evaluated with various use cases of sharding topology by deploying on OKE and OLCNE. - + You can also download the pre-built Oracle Global Data Services `container-registry.oracle.com/database/gsm:latest` and Oracle Database images `container-registry.oracle.com/database/enterprise:latest` from [Oracle Container Registry](https://container-registry.oracle.com/ords/f?p=113:10::::::). These images are functionally tested and evaluated with various use cases of Oracle Globally Distributed Database topology by deploying on OKE and OLCNE. + + **Note:** You will need to accept Agreement from container-registry.orcale.com to be able to pull the pre-built container images. **OR** @@ -82,17 +83,17 @@ Choose one of the following deployment options: * [Oracle Global Data Services Image](https://github.com/oracle/db-sharding/tree/master/docker-based-sharding-deployment/dockerfiles) * [Oracle Database Image](https://github.com/oracle/docker-images/tree/main/OracleDatabase/SingleInstance) -After the images are ready, push them to your Docker Images Repository, so that you can pull them during Oracle Database Sharding topology provisioning. +After the images are ready, push them to your Docker Images Repository, so that you can pull them during Oracle Globally Distributed Database topology provisioning. You can either download the images and push them to your Docker Images Repository, or, if your Kubernetes cluster can reach OCR, you can download these images directly from OCR. -**Note**: In the sharding example yaml files, we are using GDS and database images available on [Oracle Container Registry](https://container-registry.oracle.com/ords/f?p=113:10::::::). +**Note**: In the Oracle Globally Distributed Database Topology example yaml files, we are using GDS and database images available on [Oracle Container Registry](https://container-registry.oracle.com/ords/f?p=113:10::::::). **Note:** In case you want to use the `Oracle Database 23ai Free` Image for Database and GSM, refer to section [Oracle Database 23ai Free](#oracle-database-23ai-free) for more details. -### 4. Create a namespace for the Oracle DB Sharding Setup +### 4. Create a namespace for the Oracle Globally Distributed Database Setup - Create a Kubernetes namespace named `shns`. All the resources belonging to the Oracle Database Sharding Setup will be provisioned in this namespace named `shns`. For example: + Create a Kubernetes namespace named `shns`. All the resources belonging to the Oracle Globally Distributed Database Topology Setup will be provisioned in this namespace named `shns`. For example: ```sh #### Create the namespace @@ -102,11 +103,11 @@ You can either download the images and push them to your Docker Images Repositor kubectl get ns ``` -### 5. Create a Kubernetes secret for the database installation owner for the database Sharding Deployment +### 5. Create a Kubernetes secret for the database installation owner for the Oracle Globally Distributed Database Topology Deployment Create a Kubernetes secret named `db-user-pass-rsa` using these steps: [Create Kubernetes Secret](./provisioning/create_kubernetes_secret_for_db_user.md) -After you have the above prerequisites completed, you can proceed to the next section for your environment to provision the Oracle Database Sharding Topology. +After you have the above prerequisites completed, you can proceed to the next section for your environment to provision the Oracle Globally Distributed Database Topology. ### 6. Provisioning a Persistent Volume having an Oracle Database Gold Image @@ -116,73 +117,73 @@ In case of an `OCI OKE` cluster, you can use this Persistent Volume during provi You can refer [here](./provisioning/provisioning_persistent_volume_having_db_gold_image.md) for the steps involved. -**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. So, this step will not be needed if you are deploying Oracle Sharded Database using Oracle 23ai Free Database and GSM Images. +**NOTE:** Provisioning the Oracle Globally Distributed Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. So, this step will not be needed if you are deploying Oracle Globally Distributed Database using Oracle 23ai Free Database and GSM Images. ## Oracle Database 23ai Free Please refer to [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) documentation for more details. -If you want to use Oracle Database 23ai Free Image for Database and GSM for deployment of the Sharded Database using Sharding Controller in Oracle Database Kubernetes Operator, you need to consider the below points: +If you want to use Oracle Database 23ai Free Image for Database and GSM for deployment of the Oracle Globally Distributed Database using Sharding Controller in Oracle Database Kubernetes Operator, you need to consider the below points: * To deploy using the FREE Database and GSM Image, you will need to add the additional parameter `dbEdition: "free"` to the .yaml file. -* Refer to [Sample Sharded Database Deployment using Oracle 23ai FREE Database and GSM Images](./provisioning/free/sharding_provisioning_with_free_images.md) for an example. +* Refer to [Sample Oracle Globally Distributed Database Deployment using Oracle 23ai FREE Database and GSM Images](./provisioning/free/sharding_provisioning_with_free_images.md) for an example. * For Oracle Database 23ai Free, you can control the `CPU` and `Memory` allocation of the PODs using tags `cpu` and `memory` respectively but tags `INIT_SGA_SIZE` and `INIT_PGA_SIZE` to control the SGA and PGA allocation at the database level are `not` supported. -* Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. +* Provisioning the Oracle Globally Distributed Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. * Total number of chunks for FREE Database defaults to `12` if `CATALOG_CHUNKS` parameter is not specified. This default value is determined considering limitation of 12 GB of user data on disk for oracle free database. -## Provisioning Sharding Topology with System-Managed Sharding in a Cloud-Based Kubernetes Cluster +## Provisioning Oracle Globally Distributed Database Topology with System-Managed Sharding in a Cloud-Based Kubernetes Cluster -Deploy Oracle Database Sharding Topology with `System-Managed Sharding` on your Cloud based Kubernetes cluster. +Deploy Oracle Globally Distributed Database Topology with `System-Managed Sharding` on your Cloud based Kubernetes cluster. -In this example, the deployment uses the YAML file based on `OCI OKE` cluster. There are multiple use case possible for deploying the Oracle Database Sharding Topology covered by below examples: +In this example, the deployment uses the YAML file based on `OCI OKE` cluster. There are multiple use case possible for deploying the Oracle Globally Distributed Database Topology covered by below examples: -[1. Provisioning Oracle Sharded Database with System-Managed Sharding without Database Gold Image](./provisioning/system_sharding/ssharding_provisioning_without_db_gold_image.md) -[2. Provisioning Oracle Sharded Database with System-Managed Sharding with number of chunks specified](./provisioning/system_sharding/ssharding_provisioning_with_chunks_specified.md) -[3. Provisioning Oracle Sharded Database with System-Managed Sharding with additional control on resources like Memory and CPU allocated to Pods](./provisioning/system_sharding/ssharding_provisioning_with_control_on_resources.md) -[4. Provisioning Oracle Sharded Database with System-Managed Sharding by cloning database from your own Database Gold Image in the same Availability Domain(AD)](./provisioning/system_sharding/ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md) -[5. Provisioning Oracle Sharded Database with System-Managed Sharding by cloning database from your own Database Gold Image across Availability Domains(ADs)](./provisioning/system_sharding/ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md) -[6. Provisioning Oracle Sharded Database with System-Managed Sharding and send Notification using OCI Notification Service](./provisioning/system_sharding/ssharding_provisioning_with_notification_using_oci_notification.md) -[7. Scale Out - Add Shards to an existing Oracle Sharded Database provisioned earlier with System-Managed Sharding](./provisioning/system_sharding/ssharding_scale_out_add_shards.md) -[8. Scale In - Delete an existing Shard from a working Oracle Sharded Database provisioned earlier with System-Managed Sharding](./provisioning/system_sharding/ssharding_scale_in_delete_an_existing_shard.md) +[1. Provisioning Oracle Globally Distributed Database with System-Managed Sharding without Database Gold Image](./provisioning/system_sharding/ssharding_provisioning_without_db_gold_image.md) +[2. Provisioning Oracle Globally Distributed Database with System-Managed Sharding with number of chunks specified](./provisioning/system_sharding/ssharding_provisioning_with_chunks_specified.md) +[3. Provisioning Oracle Globally Distributed Database with System-Managed Sharding with additional control on resources like Memory and CPU allocated to Pods](./provisioning/system_sharding/ssharding_provisioning_with_control_on_resources.md) +[4. Provisioning Oracle Globally Distributed Database with System-Managed Sharding by cloning database from your own Database Gold Image in the same Availability Domain(AD)](./provisioning/system_sharding/ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md) +[5. Provisioning Oracle Globally Distributed Database with System-Managed Sharding by cloning database from your own Database Gold Image across Availability Domains(ADs)](./provisioning/system_sharding/ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md) +[6. Provisioning Oracle Globally Distributed Database with System-Managed Sharding and send Notification using OCI Notification Service](./provisioning/system_sharding/ssharding_provisioning_with_notification_using_oci_notification.md) +[7. Scale Out - Add Shards to an existing Oracle Globally Distributed Database provisioned earlier with System-Managed Sharding](./provisioning/system_sharding/ssharding_scale_out_add_shards.md) +[8. Scale In - Delete an existing Shard from a working Oracle Globally Distributed Database provisioned earlier with System-Managed Sharding](./provisioning/system_sharding/ssharding_scale_in_delete_an_existing_shard.md) -## Provisioning Sharding Topology with User Defined Sharding in a Cloud-Based Kubernetes Cluster +## Provisioning Oracle Globally Distributed Database Topology with User-Defined Sharding in a Cloud-Based Kubernetes Cluster -Deploy Oracle Database Sharding Topology with `User Defined Sharding` on your Cloud based Kubernetes cluster. +Deploy Oracle Globally Distributed Database Topology with `User-Defined Sharding` on your Cloud based Kubernetes cluster. -In this example, the deployment uses the YAML file based on `OCI OKE` cluster. There are multiple use case possible for deploying the Oracle Database Sharding Topology covered by below examples: +In this example, the deployment uses the YAML file based on `OCI OKE` cluster. There are multiple use case possible for deploying the Oracle Globally Distributed Database Topology covered by below examples: -[1. Provisioning Oracle Sharded Database with User Defined Sharding without Database Gold Image](./provisioning/user-defined-sharding/udsharding_provisioning_without_db_gold_image.md) -[2. Provisioning Oracle Sharded Database with User Defined Sharding with additional control on resources like Memory and CPU allocated to Pods](./provisioning/user-defined-sharding/udsharding_provisioning_with_control_on_resources.md) -[3. Provisioning Oracle Sharded Database with User Defined Sharding by cloning database from your own Database Gold Image in the same Availability Domain(AD)](./provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_gold_image_in_same_ad.md) -[4. Provisioning Oracle Sharded Database with User Defined Sharding by cloning database from your own Database Gold Image across Availability Domains(ADs)](./provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_from_gold_image_across_ads.md) -[5. Provisioning Oracle Sharded Database with User Defined Sharding and send Notification using OCI Notification Service](./provisioning/user-defined-sharding/udsharding_provisioning_with_notification_using_oci_notification.md) -[6. Scale Out - Add Shards to an existing Oracle Sharded Database provisioned earlier with User Defined Sharding](./provisioning/user-defined-sharding/udsharding_scale_out_add_shards.md) -[7. Scale In - Delete an existing Shard from a working Oracle Sharded Database provisioned earlier with User Defined Sharding](./provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md) +[1. Provisioning Oracle Globally Distributed Database with User-Defined Sharding without Database Gold Image](./provisioning/user-defined-sharding/udsharding_provisioning_without_db_gold_image.md) +[2. Provisioning Oracle Globally Distributed Database with User-Defined Sharding with additional control on resources like Memory and CPU allocated to Pods](./provisioning/user-defined-sharding/udsharding_provisioning_with_control_on_resources.md) +[3. Provisioning Oracle Globally Distributed Database with User-Defined Sharding by cloning database from your own Database Gold Image in the same Availability Domain(AD)](./provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_gold_image_in_same_ad.md) +[4. Provisioning Oracle Globally Distributed Database with User-Defined Sharding by cloning database from your own Database Gold Image across Availability Domains(ADs)](./provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_from_gold_image_across_ads.md) +[5. Provisioning Oracle Globally Distributed Database with User-Defined Sharding and send Notification using OCI Notification Service](./provisioning/user-defined-sharding/udsharding_provisioning_with_notification_using_oci_notification.md) +[6. Scale Out - Add Shards to an existing Oracle Globally Distributed Database provisioned earlier with User-Defined Sharding](./provisioning/user-defined-sharding/udsharding_scale_out_add_shards.md) +[7. Scale In - Delete an existing Shard from a working Oracle Globally Distributed Database provisioned earlier with User-Defined Sharding](./provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md) -## Provisioning System-Managed Sharding Topology with Raft replication enabled in a Cloud-Based Kubernetes Cluster +## Provisioning Oracle Globally Distributed Database Topology with System-Managed Sharding and Raft replication enabled in a Cloud-Based Kubernetes Cluster -Deploy Oracle Database Sharding Topology with `System-Managed Sharding with SNR RAFT enabled` on your Cloud based Kubernetes cluster. +Deploy Oracle Globally Distributed Database Topology with `System-Managed Sharding` and with `RAFT Replication` enabled on your Cloud based Kubernetes cluster. -**NOTE: SNR RAFT Feature is available only for Oracle 23ai RDBMS and Oracle 23ai GSM version.** +**NOTE: RAFT Replication Feature is available only for Oracle 23ai RDBMS and Oracle 23ai GSM version.** -In this example, the deployment uses the YAML file based on `OCI OKE` cluster. There are multiple use case possible for deploying the Oracle Database Sharding Topology covered by below examples: +In this example, the deployment uses the YAML file based on `OCI OKE` cluster. There are multiple use case possible for deploying the Oracle Globally Distributed Database Topology covered by below examples: -[1. Provisioning System-Managed Sharding Topology with Raft replication enabled without Database Gold Image](./provisioning/snr_system_sharding/snr_ssharding_provisioning_without_db_gold_image.md) -[2. Provisioning System-Managed Sharding Topology with Raft replication enabled with number of chunks specified](./provisioning/snr_system_sharding/snr_ssharding_provisioning_with_chunks_specified.md) -[3. Provisioning System-Managed Sharding Topology with Raft replication enabled with additional control on resources like Memory and CPU allocated to Pods](./provisioning/snr_system_sharding/snr_ssharding_provisioning_with_control_on_resources.md) -[4. Provisioning System-Managed Sharding Topology with Raft replication enabled by cloning database from your own Database Gold Image in the same Availability Domain(AD)](./provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md) -[5. Provisioning System-Managed Sharding Topology with Raft replication enabled by cloning database from your own Database Gold Image across Availability Domains(ADs)](./provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md) -[6. Provisioning System-Managed Sharding Topology with Raft replication enabled and send Notification using OCI Notification Service](./provisioning/snr_system_sharding/snr_ssharding_provisioning_with_notification_using_oci_notification.md) -[7. Scale Out - Add Shards to an existing Oracle Sharded Database provisioned earlier with System-Managed Sharding with RAFT replication enabled](./provisioning/snr_system_sharding/snr_ssharding_scale_out_add_shards.md) -[8. Scale In - Delete an existing Shard from a working Oracle Sharded Database provisioned earlier with System-Managed Sharding with RAFT reolication enabled](./provisioning/snr_system_sharding/snr_ssharding_scale_in_delete_an_existing_shard.md) +[1. Provisioning Oracle Globally Distributed Database Topology with System-Managed Sharding and Raft replication enabled without Database Gold Image](./provisioning/snr_system_sharding/snr_ssharding_provisioning_without_db_gold_image.md) +[2. Provisioning Oracle Globally Distributed Database Topology with System-Managed Sharding and Raft replication enabled with number of chunks specified](./provisioning/snr_system_sharding/snr_ssharding_provisioning_with_chunks_specified.md) +[3. Provisioning Oracle Globally Distributed Database Topology with System-Managed Sharding and Raft replication enabled with additional control on resources like Memory and CPU allocated to Pods](./provisioning/snr_system_sharding/snr_ssharding_provisioning_with_control_on_resources.md) +[4. Provisioning Oracle Globally Distributed Database Topology with System-Managed Sharding and Raft replication enabled by cloning database from your own Database Gold Image in the same Availability Domain(AD)](./provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md) +[5. Provisioning Oracle Globally Distributed Database Topology with System-Managed Sharding and Raft replication enabled by cloning database from your own Database Gold Image across Availability Domains(ADs)](./provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md) +[6. Provisioning Oracle Globally Distributed Database Topology with System-Managed Sharding and Raft replication enabled send Notification using OCI Notification Service](./provisioning/snr_system_sharding/snr_ssharding_provisioning_with_notification_using_oci_notification.md) +[7. Scale Out - Add Shards to an existing Oracle Globally Distributed Database provisioned earlier with System-Managed Sharding and RAFT replication enabled](./provisioning/snr_system_sharding/snr_ssharding_scale_out_add_shards.md) +[8. Scale In - Delete an existing Shard from a working Oracle Globally Distributed Database provisioned earlier with System-Managed Sharding and RAFT reolication enabled](./provisioning/snr_system_sharding/snr_ssharding_scale_in_delete_an_existing_shard.md) -## Connecting to Shard Databases +## Connecting to Oracle Globally Distributed Database -After the Oracle Database Sharding Topology has been provisioned using the Sharding Controller in Oracle Database Kubernetes Operator, you can follow the steps in this document to connect to the Sharded Database or to the individual Shards: [Database Connectivity](./provisioning/database_connection.md) +After the Oracle Globally Distributed Database Topology has been provisioned using the Sharding Controller in Oracle Database Kubernetes Operator, you can follow the steps in this document to connect to the Oracle Globally Distributed Database or to the individual Shards: [Database Connectivity](./provisioning/database_connection.md) ## Debugging and Troubleshooting -To debug the Oracle Database Sharding Topology provisioned using the Sharding Controller of Oracle Database Kubernetes Operator, follow this document: [Debugging and troubleshooting](./provisioning/debugging.md) +To debug the Oracle Globally Distributed Database Topology provisioned using the Sharding Controller of Oracle Database Kubernetes Operator, follow this document: [Debugging and troubleshooting](./provisioning/debugging.md) diff --git a/docs/sharding/provisioning/database_connection.md b/docs/sharding/provisioning/database_connection.md index 7f64bbd5..5671520b 100644 --- a/docs/sharding/provisioning/database_connection.md +++ b/docs/sharding/provisioning/database_connection.md @@ -1,10 +1,10 @@ # Database Connectivity -The Oracle Database Sharding Topology deployed by Sharding Controller in Oracle Database Operator has an external IP available for each of the container. +The Oracle Globally Distributed Database Topology deployed by Sharding Controller in Oracle Database Operator has an external IP available for each of the container. ## Below is an example setup with connection details -Check the details of the Sharding Topology provisioned using Sharding Controller: +Check the details of the Oracle Globally Distributed Database Topology provisioned using Sharding Controller: ```sh $ kubectl get all -n shns @@ -40,5 +40,5 @@ After you have the external IP address, you can use the services shown below to 1. **Direct connection to the CATALOG Database**: Connect to the service `catalogpdb` on catalog container external IP `xx.xx.xx.116` on port `1521` 2. **Direct connection to the shard Database SHARD1**: Connect to the service `shard1pdb` on catalog container external IP `xx.xx.xx.187` on port `1521` 3. **Direct connection to the shard Database SHARD2**: Connect to the service `shard2pdb` on catalog container external IP `xx.xx.xx.197` on port `1521` -4. **Connection to SHARDED Database for DML activity (INSERT/UPDATE/DELETE)**: Connect to the service `oltp_rw_svc.catalog.oradbcloud` either on primary gsm GSM1 container external IP `xx.xx.xx.38` on port `1522` **or** on standby gsm GSM2 container external IP `xx.xx.xx.66` on port `1522` +4. **Connection to Oracle Globally Distributed Database for DML activity (INSERT/UPDATE/DELETE)**: Connect to the service `oltp_rw_svc.catalog.oradbcloud` either on primary gsm GSM1 container external IP `xx.xx.xx.38` on port `1522` **or** on standby gsm GSM2 container external IP `xx.xx.xx.66` on port `1522` 5. **Connection to the catalog database for DDL activity**: Connect to the service `GDS$CATALOG.oradbcloud` on catalog container external IP `xx.xx.xx.116` on port `1521` diff --git a/docs/sharding/provisioning/debugging.md b/docs/sharding/provisioning/debugging.md index 63e02b6a..330cfc0e 100644 --- a/docs/sharding/provisioning/debugging.md +++ b/docs/sharding/provisioning/debugging.md @@ -1,6 +1,6 @@ # Debugging and Troubleshooting -When the Oracle Database Sharding Topology is provisioned using the Oracle Database Kubernetes Operator, the debugging of an issue with the deployment depends on at which stage the issue has been seen. +When the Oracle Globally Distributed Database Topology is provisioned using the Oracle Database Kubernetes Operator, the debugging of an issue with the deployment depends on at which stage the issue has been seen. Below are the possible cases and the steps to debug such an issue: @@ -24,7 +24,7 @@ kubectl describe pod/catalog-0 -n shns In case the failure is related to the Cloud Infrastructure, you will need to troubleshooting that using the documentation from the cloud provider. -## Failure in the provisioning of the Sharded Database +## Failure in the provisioning of the Oracle Globally Distributed Database In case the failure occures after the Kubernetes Pods are created but during the execution of the scripts to create the shard databases, catalog database or the GSM, you will need to trobleshoot that at the individual Pod level. @@ -40,11 +40,11 @@ To check the logs at the GSM or at the Database level or at the host level, swit kubectl exec -it catalog-0 -n shns /bin/bash ``` -Now, you can troubleshooting the corresponding component using the alert log or the trace files etc just like a normal Sharding Database Deployment. Please refer to [Oracle Database Sharding Documentation](https://docs.oracle.com/en/database/oracle/oracle-database/19/shard/sharding-troubleshooting.html#GUID-629262E5-7910-4690-A726-A565C59BA73E) for this purpose. +Now, you can troubleshooting the corresponding component using the alert log or the trace files etc just like a normal Oracle Globally Distributed Database Deployment. Please refer to [Oracle Globally Distributed Database Documentation](https://docs.oracle.com/en/database/oracle/oracle-database/19/shard/sharding-troubleshooting.html#GUID-629262E5-7910-4690-A726-A565C59BA73E) for this purpose. ## Debugging using Database Events -* You can enable database events as part of the Sharded Database Deployment +* You can enable database events as part of the Oracle Globally Distributed Database Deployment * This can be enabled using the `envVars` * One example of enabling Database Events is [sharding_provisioning_with_db_events.md](./debugging/sharding_provisioning_with_db_events.md) \ No newline at end of file diff --git a/docs/sharding/provisioning/debugging/sharding_provisioning_with_db_events.md b/docs/sharding/provisioning/debugging/sharding_provisioning_with_db_events.md index fa73920f..763f2dc8 100644 --- a/docs/sharding/provisioning/debugging/sharding_provisioning_with_db_events.md +++ b/docs/sharding/provisioning/debugging/sharding_provisioning_with_db_events.md @@ -1,17 +1,17 @@ -# Example of provisioning Oracle Sharded Database along with DB Events set at Database Level +# Example of provisioning Oracle Globally Distributed Database along with DB Events set at Database Level **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. This example sets a Database Event at the Database Level for Catalog and Shard Databases. -The sharded database in this example is deployed with System-Managed Sharding type. In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with System-Managed Sharding is deployed using Oracle Sharding controller. +The Oracle Globally Distributed Database in this example is deployed with System-Managed Sharding type. In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Globally Distributed Database topology with System-Managed Sharding is deployed using Oracle Sharding controller. **NOTE:** In this use case, because DBCA creates the database automatically during the deployment, the time required to create the database is greater than the time it takes when the database is created by cloning from a Database Gold Image. -This example uses `sharding_provisioning_with_db_events.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: +This example uses `sharding_provisioning_with_db_events.yaml` to provision an Oracle Globally Distributed Database topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three sharding Pods: `shard1`, `shard2` and `shard3` +* Three Shard Database Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Database Event: `10798 trace name context forever, level 7` set along with `GWM_TRACE level 263` diff --git a/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.md b/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.md index 61641312..f6b53462 100644 --- a/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.md +++ b/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.md @@ -1,25 +1,25 @@ -# Example of provisioning Oracle Sharded Database with Oracle 23ai FREE Database and GSM Images +# Example of provisioning Oracle Globally Distributed Database with Oracle 23ai FREE Database and GSM Images **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. This example uses the Oracle 23ai FREE Database and GSM Images. -The sharded database in this example is deployed with System-Managed Sharding type. In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with System-Managed Sharding is deployed using Oracle Sharding controller. +The Oracle Globally Distributed Database in this example is deployed with System-Managed Sharding type. In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with System-Managed Sharding is deployed using Oracle Sharding controller. **NOTE:** In this use case, because DBCA creates the database automatically during the deployment, the time required to create the database is greater than the time it takes when the database is created by cloning from a Database Gold Image. This example uses `sharding_provisioning_with_free_images.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three sharding Pods: `shard1`, `shard2` and `shard3` +* Three Shard Database Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` To get the Oracle 23ai FREE Database and GSM Images: * The Oracle 23ai FREE RDBMS Image used is `container-registry.oracle.com/database/free:latest`. Check [Oracle Database Free Get Started](https://www.oracle.com/database/free/get-started/?source=v0-DBFree-ChatCTA-j2032-20240709) for details. + * Use the Oracle 23ai FREE GSM Image used is `container-registry.oracle.com/database/gsm:latest`. * To pull the above image from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. - * Use the Oracle 23ai FREE GSM Binaries `LINUX.X64_234000_gsm.zip` as listed on page [Oracle Database Free Get Started](https://www.oracle.com/database/free/get-started/?source=v0-DBFree-ChatCTA-j2032-20240709) and prepare the GSM Container Image following [Oracle Global Data Services Image](https://github.com/oracle/db-sharding/tree/master/docker-based-sharding-deployment/dockerfiles) * You need to change `dbImage` and `gsmImage` tag with the images you want to use in your enviornment in file `sharding_provisioning_with_free_images.yaml`. diff --git a/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.yaml b/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.yaml index 7e39b3b2..53e4191f 100644 --- a/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.yaml +++ b/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.yaml @@ -41,8 +41,8 @@ spec: storageClass: oci dbImage: container-registry.oracle.com/database/free:latest dbImagePullSecret: ocr-reg-cred - gsmImage: - gsmImagePullSecret: + gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImagePullSecret: ocr-reg-cred dbEdition: "free" isExternalSvc: False isDeleteOraPvc: True diff --git a/docs/sharding/provisioning/provisioning_persistent_volume_having_db_gold_image.md b/docs/sharding/provisioning/provisioning_persistent_volume_having_db_gold_image.md index 7d3312a6..0a453c15 100644 --- a/docs/sharding/provisioning/provisioning_persistent_volume_having_db_gold_image.md +++ b/docs/sharding/provisioning/provisioning_persistent_volume_having_db_gold_image.md @@ -2,7 +2,7 @@ In this use case, a Persistent Volume with a Oracle Database Gold Image is created. - This is required when you do not already have a Persistent Volume with a Database Gold Image from which you can clone database to save time while deploying Oracle Sharding topology using Oracle Sharding controller. + This is required when you do not already have a Persistent Volume with a Database Gold Image from which you can clone database to save time while deploying Oracle Globally Distributed Database topology using Oracle Sharding controller. This example uses file `oraclesi.yaml` to provision a single instance Oracle Database: diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md index ba72be25..2698bce9 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md @@ -1,14 +1,14 @@ -# Provisioning System managed Sharding Topology with Raft replication enabled by cloning database from your own Database Gold Image across Availability Domains(ADs) +# Provisioning Oracle Globally Distributed Database Topology with System-managed sharding and Raft replication enabled by cloning database from your own Database Gold Image across Availability Domains(ADs) **NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -In this test case, you provision the System managed Sharding Topology with Raft replication enabled while provisioning the Catalog and Shard Databases by cloning from an existing Oracle Database Gold Image created earlier. +In this test case, you provision the Oracle Globally Distributed Database Topology with System-managed sharding and Raft replication enabled while provisioning the Catalog and Shard Databases by cloning from an existing Oracle Database Gold Image created earlier. This use case applies when you want to provision the database Pods on a Kubernetes Node in any availability domain (AD), which can also be different from the availability domain (AD) of the Block Volume that has the Oracle Database Gold Image provisioned earlier. -Choosing this option takes substantially less time during the Oracle Database Sharding Topology setup across ADs. +Choosing this option takes substantially less time during the Oracle Globally Distributed Database Topology setup across ADs. NOTE: @@ -21,10 +21,10 @@ NOTE: ```sh kubectl get pv -n shns ``` -2. Create a Block Volume Backup for this Block Volume, and use the OCID of the Block Volume Backup in the next step. This example uses `snr_ssharding_shard_prov_clone_across_ads.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: +2. Create a Block Volume Backup for this Block Volume, and use the OCID of the Block Volume Backup in the next step. This example uses `snr_ssharding_shard_prov_clone_across_ads.yaml` to provision an Oracle Globally Distributed Database topology using Oracle Sharding Database controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three sharding Pods: `shard1`, `shard2` and `shard3` +* Three Shard Database Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Database Cloning from the `BLOCK VOLUME FULL BACKUP` of the Persistent Volume which had the Gold Image. @@ -33,12 +33,12 @@ NOTE: NOTE: In this case, the Persistent Volume with DB Gold Image was provisioned in the Availability Domain `PHX-AD-1`. The Shards and Catalog will be provisioned across multiple Availability Domains by cloning the database. -**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. +**NOTE:** Provisioning the Oracle Globally Distributed Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov_clone_across_ads.yaml`. - * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. + * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) Use the file: [snr_ssharding_shard_prov_clone_across_ads.yaml](./snr_ssharding_shard_prov_clone_across_ads.yaml) for this use case as below: diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md index cf4240f7..b43a9158 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md @@ -1,14 +1,14 @@ -# Provisioning System managed Sharding Topology with Raft replication enabled by cloning database from your own Database Gold Image in the same Availability Domain(AD) +# Provisioning Oracle Globally Distributed Database Topology with System-managed sharding and Raft replication enabled by cloning database from your own Database Gold Image in the same Availability Domain(AD) **NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -In this test case, you provision the System managed Sharding Topology with Raft replication enabled while provisioning the Catalog and Shard Databases by cloning from an existing Oracle Database Gold Image created earlier. +In this test case, you provision the Oracle Globally Distributed Database Topology with System-managed sharding and Raft replication enabled while provisioning the Catalog and Shard Databases by cloning from an existing Oracle Database Gold Image created earlier. This use case applies when you are cloning from a Block Volume, and you can clone _only_ in the same availability domain (AD). The result is that the cloned shard database PODs can be created _only_ in the same AD where the Gold Image Block Volume is present. -Choosing this option takes substantially less time during the Oracle Database Sharding Topology setup. +Choosing this option takes substantially less time during the Oracle Globally Distributed Database Topology setup. **NOTE** For this step, the Persistent Volume that has the Oracle Database Gold Image is identified using its OCID. @@ -18,10 +18,10 @@ Choosing this option takes substantially less time during the Oracle Database Sh kubectl get pv -n shns ``` -2. This example uses `snr_ssharding_shard_prov_clone.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: +2. This example uses `snr_ssharding_shard_prov_clone.yaml` to provision an Oracle Globally Distributed Database topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three sharding Pods: `shard1`, `shard2` and `shard3` +* Three Shard Database Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Database Cloning from the Database Gold Image present in Persistent Volume having OCID: `ocid1.volume.oc1.phx.abyhqljr3z3w72t6ay5eud7d5w3kdfhktfp6gwb6euy5tzwfaxgmbvwqlvsq` @@ -29,14 +29,14 @@ Choosing this option takes substantially less time during the Oracle Database Sh NOTE: In this case, the Persistent Volume with DB Gold Image was provisioned in the Availability Domain `PHX-AD-1`. The Shards and Catalog will be provisioned in the same Availability Domain `PHX-AD-1` by cloning the database. +**NOTE:** Provisioning the Oracle Globally Distributed Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. + In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov_clone.yaml`. * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) -**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. - Use the file: [snr_ssharding_shard_prov_clone.yaml](./snr_ssharding_shard_prov_clone.yaml) for this use case as below: 1. Deploy the `snr_ssharding_shard_prov_clone.yaml` file: diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_chunks_specified.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_chunks_specified.md index 44972090..d6171986 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_chunks_specified.md +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_chunks_specified.md @@ -1,19 +1,19 @@ -# Provisioning System-Managed Sharding Topology with Raft replication enabled with number of chunks specified +# Provisioning Oracle Globally Distributed Database Topology with System-managed sharding and Raft replication enabled with number of chunks specified **NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with System-Managed with RAFT Replication enabled is deployed using Oracle Sharding controller. +In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Globally Distributed Database topology with System-Managed sharding and RAFT Replication enabled is deployed using Oracle Sharding controller. **NOTE** In this use case, because DBCA creates the database automatically during the deployment, the time required to create the database is greater than the time it takes when the database is created by cloning from a Database Gold Image. -By default, the System-Managed with RAFT Replication deploys the Sharded Database with 360 chunks per Shard Database (because there are 3 chunks created for each replication unit). In this example, the Sharded Database will be deployed with non-default number of chunks specified using parameter `CATALOG_CHUNKS`. +By default, the System-Managed with RAFT Replication deploys the Oracle Globally Distributed Database with 360 chunks per Shard Database (because there are 3 chunks created for each replication unit). In this example, the Oracle Globally Distributed Database will be deployed with non-default number of chunks specified using parameter `CATALOG_CHUNKS`. -This example uses `snr_ssharding_shard_prov_chunks.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: +This example uses `snr_ssharding_shard_prov_chunks.yaml` to provision an Oracle Globally Distributed Database topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three sharding Pods: `shard1`, `shard2` and `shard3` +* Three Shard Database Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Total number of chunks as `120` specified by variable `CATALOG_CHUNKS` (it will be 120 chunks per shard) * Namespace: `shns` diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_control_on_resources.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_control_on_resources.md index 9cfd6afb..c432310d 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_control_on_resources.md +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_control_on_resources.md @@ -1,15 +1,15 @@ -# Provisioning System-Managed Sharding Topology with Raft replication enabled with additional control on resources like Memory and CPU allocated to Pods +# Provisioning Oracle Globally Distributed Database Topology with System-managed sharding and Raft replication enabled with additional control on resources like Memory and CPU allocated to Pods **NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -In this use case, there are additional tags used to control resources such as CPU and Memory used by the different Pods when the Oracle Sharding topology with System-Managed with RAFT Replication is deployed using Oracle Sharding controller. +In this use case, there are additional tags used to control resources such as CPU and Memory used by the different Pods when the Oracle Globally Distributed Database Topology with System-managed sharding and RAFT Replication is deployed using Oracle Sharding controller. This example uses `snr_ssharding_shard_prov_memory_cpu.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three sharding Pods: `shard1`, `shard2` and `shard3` +* Three Shard Database Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Tags `memory` and `cpu` to control the Memory and CPU of the PODs diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_notification_using_oci_notification.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_notification_using_oci_notification.md index d4cb11de..d45b2911 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_notification_using_oci_notification.md +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_notification_using_oci_notification.md @@ -1,15 +1,15 @@ -# Provisioning System managed Sharding Topology with Raft replication enabled and send Notification using OCI Notification Service +# Provisioning Oracle Globally Distributed Database Topology with System-managed sharding and Raft replication enabled and send Notification using OCI Notification Service **NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -This use case demonstrates how to use a notification service like OCI Notification service to send an email notification when a particular operation is completed on an Oracle Database sharding topology provisioned using the Oracle Database sharding controller. +This use case demonstrates how to use a notification service like OCI Notification service to send an email notification when a particular operation is completed on an Oracle Globally Distributed Database Topology provisioned using the Oracle Database sharding controller. -This example uses `snr_ssharding_shard_prov_send_notification.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: +This example uses `snr_ssharding_shard_prov_send_notification.yaml` to provision an Oracle Globally Distributed Database Topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three sharding Pods: `shard1`, `shard2` and `shard3` +* Three Shard Database Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Database Cloning from the `BLOCK VOLUME FULL BACKUP` of the Persistent Volume that has the Database Gold Image created earlier. @@ -64,14 +64,14 @@ To do this: kubectl describe secret my-secret -n shns ``` +**NOTE:** Provisioning the Oracle Globally Distributed Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. + In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov_send_notification.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. -**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. - Use the file: [snr_ssharding_shard_prov_send_notification.yaml](./snr_ssharding_shard_prov_send_notification.yaml) for this use case as below: 1. Deploy the `snr_ssharding_shard_prov_send_notification.yaml` file: diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_without_db_gold_image.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_without_db_gold_image.md index 892741a5..c8568f1e 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_without_db_gold_image.md +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_without_db_gold_image.md @@ -1,17 +1,17 @@ -# Provisioning System-Managed Sharding Topology with Raft replication enabled without Database Gold Image +# Provisioning Oracle Globally Distributed Database Topology with System-managed sharding and Raft replication enabled without Database Gold Image **NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with System-Managed with RAFT Replication enabled is deployed using Oracle Sharding controller. +In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Globally Distributed Database topology with System-Managed sharding and RAFT Replication enabled is deployed using Oracle Sharding controller. **NOTE** In this use case, because DBCA creates the database automatically during the deployment, the time required to create the database is greater than the time it takes when the database is created by cloning from a Database Gold Image. -This example uses `snr_ssharding_shard_prov.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: +This example uses `snr_ssharding_shard_prov.yaml` to provision an Oracle Globally Distributed Database topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three sharding Pods: `shard1`, `shard2` and `shard3` +* Three Shard Database Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * `RAFT Replication` enabled diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_in_delete_an_existing_shard.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_in_delete_an_existing_shard.md index fe3157ec..dc026a7c 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_in_delete_an_existing_shard.md +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_in_delete_an_existing_shard.md @@ -1,4 +1,4 @@ -# Scale In - Delete an existing Shard from a working Oracle Sharded Database provisioned earlier with System-Managed Sharding with RAFT reolication enabled +# Scale In - Delete an existing Shard from a working Oracle Globally Distributed Database provisioned earlier with System-Managed Sharding and RAFT reolication enabled **NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** @@ -8,10 +8,10 @@ This use case demonstrates how to delete an existing Shard from an existing Orac **NOTE** The deletion of a shard is done after verifying the Chunks have been moved out of that shard. -In this use case, the existing database Sharding is having: +In this use case, the existing Oracle Globally Distributed Database is having: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Five sharding Pods: `shard1`,`shard2`,`shard3`,`shard4` and `shard5` +* Five Shard Database Pods: `shard1`,`shard2`,`shard3`,`shard4` and `shard5` * One Catalog Pod: `catalog` * Namespace: `shns` * `RAFT Replication` enabled diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_out_add_shards.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_out_add_shards.md index 03423e72..962bf64c 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_out_add_shards.md +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_out_add_shards.md @@ -1,15 +1,15 @@ -# Scale Out - Add Shards to an existing Oracle Sharded Database provisioned earlier with System-Managed Sharding with RAFT replication enabled +# Scale Out - Add Shards to an existing Oracle Globally Distributed Database Topology provisioned earlier with System-Managed Sharding and RAFT replication enabled **NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -This use case demonstrates adding a new shard to an existing Oracle Database sharding topology with System-Managed with RAFT Replication enabled provisioned earlier using Oracle Database Sharding controller. +This use case demonstrates adding a new shard to an existing Oracle Globally Distributed Database topology with System-Managed sharding with RAFT Replication enabled provisioned earlier using Oracle Database Sharding controller. -In this use case, the existing Oracle Database sharding topology is having: +In this use case, the existing Oracle Globally Distributed Database topology is having: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three sharding Pods: `shard1`, `shard2` and `shard3` +* Three Shard Database Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * `RAFT Replication` enabled @@ -18,7 +18,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov_extshard.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) - * If the existing Sharding Topology was deployed using [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then the additional parameter `dbEdition: "free"` will be needed for the below .yaml file as well. + * If the existing Oracle Globally Distributed Database Topology was deployed using [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then the additional parameter `dbEdition: "free"` will be needed for the below .yaml file as well. This use case adds two new shards `shard4`,`shard5` to above Sharding Topology. diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md index 64e2f4eb..e015f916 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md @@ -1,4 +1,4 @@ -# Provisioning Oracle Sharded Database with System-Managed Sharding by cloning database from your own Database Gold Image across Availability Domains(ADs) +# Provisioning Oracle Globally Distributed Database with System-Managed Sharding by cloning database from your own Database Gold Image across Availability Domains(ADs) **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. @@ -6,7 +6,7 @@ In this test case, you provision the Oracle Database sharding topology with Syst This use case applies when you want to provision the database Pods on a Kubernetes Node in any availability domain (AD), which can also be different from the availability domain (AD) of the Block Volume that has the Oracle Database Gold Image provisioned earlier. -Choosing this option takes substantially less time during the Oracle Database Sharding Topology setup across ADs. +Choosing this option takes substantially less time during the Oracle Globally Distributed Database Topology setup across ADs. NOTE: @@ -22,7 +22,7 @@ NOTE: 2. Create a Block Volume Backup for this Block Volume, and use the OCID of the Block Volume Backup in the next step. This example uses `ssharding_shard_prov_clone_across_ads.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three sharding Pods: `shard1`, `shard2` and `shard3` +* Three Shard Database Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Database Cloning from the `BLOCK VOLUME FULL BACKUP` of the Persistent Volume which had the Gold Image. @@ -31,11 +31,11 @@ NOTE: NOTE: In this case, the Persistent Volume with DB Gold Image was provisioned in the Availability Domain `PHX-AD-1`. The Shards and Catalog will be provisioned across multiple Availability Domains by cloning the database. -**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. +**NOTE:** Provisioning the Oracle Globally Distributed Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. - * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov_clone_across_ads.yaml`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov_clone_across_ads.yaml`. * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md index f7aef949..fb16e3cd 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md @@ -1,4 +1,4 @@ -# Provisioning Oracle Sharded Database with System-Managed Sharding by cloning database from your own Database Gold Image in the same Availability Domain(AD) +# Provisioning Oracle Globally Distributed Database with System-Managed Sharding by cloning database from your own Database Gold Image in the same Availability Domain(AD) **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. @@ -6,7 +6,7 @@ In this case, the database is created automatically by cloning from an existing This use case applies when you are cloning from a Block Volume, and you can clone _only_ in the same availability domain (AD). The result is that the cloned shard database PODs can be created _only_ in the same AD where the Gold Image Block Volume is present. -Choosing this option takes substantially less time during the Oracle Database Sharding Topology setup. +Choosing this option takes substantially less time during the Oracle Globally Distributed Database Topology setup. **NOTE** For this step, the Persistent Volume that has the Oracle Database Gold Image is identified using its OCID. @@ -19,12 +19,12 @@ Choosing this option takes substantially less time during the Oracle Database Sh 2. This example uses `ssharding_shard_prov_clone.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three sharding Pods: `shard1`, `shard2` and `shard3` +* Three Shard Database Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Database Cloning from the Database Gold Image present in Persistent Volume having OCID: `ocid1.volume.oc1.phx.abyhqljr3z3w72t6ay5eud7d5w3kdfhktfp6gwb6euy5tzwfaxgmbvwqlvsq` -**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. +**NOTE:** Provisioning the Oracle Globally Distributed Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. NOTE: In this case, the Persistent Volume with DB Gold Image was provisioned in the Availability Domain `PHX-AD-1`. The Shards and Catalog will be provisioned in the same Availability Domain `PHX-AD-1` by cloning the database. diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_chunks_specified.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_chunks_specified.md index 0c6ea8fe..b824ab03 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_chunks_specified.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_chunks_specified.md @@ -1,4 +1,4 @@ -# Provisioning Oracle Sharded Database with System-Managed Sharding with number of chunks specified +# Provisioning Oracle Globally Distributed Database with System-Managed Sharding with number of chunks specified **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. @@ -6,12 +6,12 @@ In this use case, the database is created automatically using DBCA during the pr **NOTE:** In this use case, because DBCA creates the database automatically during the deployment, the time required to create the database is greater than the time it takes when the database is created by cloning from a Database Gold Image. -By default, the System-Managed Sharding deploys the Sharded Database with 120 chunks per Shard Database. If, for example, we have three shards in the Sharded Database, it will be total of 360 chunks. In this example, the Sharded Database will be deployed with non-default number of chunks specified using parameter `CATALOG_CHUNKS`. +By default, the System-Managed Sharding deploys the Oracle Globally Distributed Database with 120 chunks per Shard Database. For example, if we have three shards in the Oracle Globally Distributed Database, it will be total of 360 chunks. In this example, the Oracle Globally Distributed Database will be deployed with non-default number of chunks specified using parameter `CATALOG_CHUNKS`. This example uses `ssharding_shard_prov_chunks.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three sharding Pods: `shard1`, `shard2` and `shard3` +* Three Shard Database Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Total number of chunks as `120` specified by variable `CATALOG_CHUNKS` (it will be 40 chunks per shard) * Namespace: `shns` diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_control_on_resources.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_control_on_resources.md index c4f45a48..153a40a9 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_control_on_resources.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_control_on_resources.md @@ -1,4 +1,4 @@ -# Provisioning Oracle Sharded Database with System-Managed Sharding with additional control on resources like Memory and CPU allocated to Pods +# Provisioning Oracle Globally Distributed Database with System-Managed Sharding with additional control on resources like Memory and CPU allocated to Pods **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. @@ -7,7 +7,7 @@ In this use case, there are additional tags used to control resources such as CP This example uses `ssharding_shard_prov_memory_cpu.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three sharding Pods: `shard1`, `shard2` and `shard3` +* Three Shard Database Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Tags `memory` and `cpu` to control the Memory and CPU of the PODs diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_notification_using_oci_notification.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_notification_using_oci_notification.md index 1a6a1ee3..7ec24439 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_notification_using_oci_notification.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_notification_using_oci_notification.md @@ -1,13 +1,13 @@ -# Provisioning Oracle Sharded Database with System-Managed Sharding and send Notification using OCI Notification Service +# Provisioning Oracle Globally Distributed Database with System-Managed Sharding and send Notification using OCI Notification Service **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -This use case demonstrates how to use a notification service like OCI Notification service to send an email notification when a particular operation is completed on an Oracle Database sharding topology provisioned using the Oracle Database sharding controller. +This use case demonstrates how to use a notification service like OCI Notification service to send an email notification when a particular operation is completed on an Oracle Globally Distributed Database topology provisioned using the Oracle Database sharding controller. This example uses `ssharding_shard_prov_send_notification.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three sharding Pods: `shard1`, `shard2` and `shard3` +* Three Shard Database Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Database Cloning from the `BLOCK VOLUME FULL BACKUP` of the Persistent Volume that has the Database Gold Image created earlier. @@ -67,7 +67,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. -**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. +**NOTE:** Provisioning the Oracle Globally Distributed Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. Use the file: [ssharding_shard_prov_send_notification.yaml](./ssharding_shard_prov_send_notification.yaml) for this use case as below: diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_without_db_gold_image.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_without_db_gold_image.md index b223d1af..b262407f 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_without_db_gold_image.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_without_db_gold_image.md @@ -1,4 +1,4 @@ -# Provisioning Oracle Sharded Database with System-Managed Sharding without Database Gold Image +# Provisioning Oracle Globally Distributed Database with System-Managed Sharding without Database Gold Image **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. @@ -9,7 +9,7 @@ In this use case, the database is created automatically using DBCA during the pr This example uses `ssharding_shard_prov.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three sharding Pods: `shard1`, `shard2` and `shard3` +* Three Shard Database Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` diff --git a/docs/sharding/provisioning/system_sharding/ssharding_scale_in_delete_an_existing_shard.md b/docs/sharding/provisioning/system_sharding/ssharding_scale_in_delete_an_existing_shard.md index bca34253..8cac01cd 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_scale_in_delete_an_existing_shard.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_scale_in_delete_an_existing_shard.md @@ -1,4 +1,4 @@ -# Scale In - Delete an existing Shard from a working Oracle Sharded Database provisioned earlier with System-Managed Sharding +# Scale In - Delete an existing Shard from a working Oracle Globally Distributed Database provisioned earlier with System-Managed Sharding **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. @@ -6,10 +6,10 @@ This use case demonstrates how to delete an existing Shard from an existing Orac **NOTE** The deletion of a shard is done after verifying the Chunks have been moved out of that shard. -In this use case, the existing database Sharding is having: +In this use case, the existing Oracle Globally Distributed Database is having: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Five sharding Pods: `shard1`,`shard2`,`shard3`,`shard4` and `shard5` +* Five Shard Database Pods: `shard1`,`shard2`,`shard3`,`shard4` and `shard5` * One Catalog Pod: `catalog` * Namespace: `shns` @@ -21,7 +21,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services NOTE: Use tag `isDelete: enable` to delete the shard you want. -This use case deletes the shard `shard4` from the above Sharding Topology. +This use case deletes the shard `shard4` from the above Oracle Globally Distributed Database Topology. Use the file: [ssharding_shard_prov_delshard.yaml](./ssharding_shard_prov_delshard.yaml) for this use case as below: diff --git a/docs/sharding/provisioning/system_sharding/ssharding_scale_out_add_shards.md b/docs/sharding/provisioning/system_sharding/ssharding_scale_out_add_shards.md index 1db8e6c3..091a01a0 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_scale_out_add_shards.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_scale_out_add_shards.md @@ -1,13 +1,13 @@ -# Scale Out - Add Shards to an existing Oracle Sharded Database provisioned earlier with System-Managed Sharding +# Scale Out - Add Shards to an existing Oracle Globally Distributed Database provisioned earlier with System-Managed Sharding **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. This use case demonstrates adding a new shard to an existing Oracle Database sharding topology with System-Managed Sharding provisioned earlier using Oracle Database Sharding controller. -In this use case, the existing Oracle Database sharding topology is having: +In this use case, the existing Oracle Globally Distributed Database topology is having: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three sharding Pods: `shard1`, `shard2` and `shard3` +* Three Shard Database Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` @@ -15,9 +15,9 @@ In this example, we are using pre-built Oracle Database and Global Data Services * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov_extshard.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) - * If the existing Sharding Topology was deployed using [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then the additional parameter `dbEdition: "free"` will be needed for the below .yaml file as well. + * If the existing Oracle Globally Distributed Database Topology was deployed using [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then the additional parameter `dbEdition: "free"` will be needed for the below .yaml file as well. -This use case adds two new shards `shard4`,`shard5` to above Sharding Topology. +This use case adds two new shards `shard4`,`shard5` to above Oracle Globally Distributed Database Topology. Use the file: [ssharding_shard_prov_extshard.yaml](./ssharding_shard_prov_extshard.yaml) for this use case as below: diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_from_gold_image_across_ads.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_from_gold_image_across_ads.md index 9b2905e8..dda8a350 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_from_gold_image_across_ads.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_from_gold_image_across_ads.md @@ -1,12 +1,12 @@ -# Provisioning Oracle Sharded Database with User Defined Sharding by cloning database from your own Database Gold Image across Availability Domains(ADs) +# Provisioning Oracle Globally Distributed Database with User-Defined Sharding by cloning database from your own Database Gold Image across Availability Domains(ADs) **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -In this test case, you provision the Oracle Database sharding topology with User Defined Sharding while provisioning the Catalog and Shard Databases by cloning from an existing Oracle Database Gold Image created earlier. +In this test case, you provision the Oracle Globally Distributed Database topology with User-Defined Sharding while provisioning the Catalog and Shard Databases by cloning from an existing Oracle Database Gold Image created earlier. This use case applies when you want to provision the database Pods on a Kubernetes Node in any availability domain (AD), which can also be different from the availability domain (AD) of the Block Volume that has the Oracle Database Gold Image provisioned earlier. -Choosing this option takes substantially less time during the Oracle Database Sharding Topology setup across ADs. +Choosing this option takes substantially less time during the Oracle Globally Distributed Database Topology setup across ADs. NOTE: @@ -19,15 +19,15 @@ NOTE: ```sh kubectl get pv -n shns ``` -2. Create a Block Volume Backup for this Block Volume, and use the OCID of the Block Volume Backup in the next step. This example uses `udsharding_shard_prov_clone_across_ads.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: +2. Create a Block Volume Backup for this Block Volume, and use the OCID of the Block Volume Backup in the next step. This example uses `udsharding_shard_prov_clone_across_ads.yaml` to provision an Oracle Globally Distributed Database topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three sharding Pods: `shard1`, `shard2` and `shard3` +* Three Shard Database Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Database Cloning from the `BLOCK VOLUME FULL BACKUP` of the Persistent Volume which had the Gold Image. * OCID of the Block Volume Backup: `ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq` -* User Defined Sharding is specified using `shardingType: USER` +* User-Defined Sharding is specified using `shardingType: USER` NOTE: In this case, the Persistent Volume with DB Gold Image was provisioned in the Availability Domain `PHX-AD-1`. The Shards and Catalog will be provisioned across multiple Availability Domains by cloning the database. @@ -37,7 +37,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) -**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. +**NOTE:** Provisioning the Oracle Globally Distributed Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. Use the file: [udsharding_shard_prov_clone_across_ads.yaml](./udsharding_shard_prov_clone_across_ads.yaml) for this use case as below: diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_gold_image_in_same_ad.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_gold_image_in_same_ad.md index a4669667..34fa2867 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_gold_image_in_same_ad.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_gold_image_in_same_ad.md @@ -1,4 +1,4 @@ -# Provisioning Oracle Sharded Database with User Defined Sharding by cloning database from your own Database Gold Image in the same Availability Domain(AD) +# Provisioning Oracle Globally Distributed Database with User-Defined Sharding by cloning database from your own Database Gold Image in the same Availability Domain(AD) **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. @@ -6,7 +6,7 @@ In this case, the database is created automatically by cloning from an existing This use case applies when you are cloning from a Block Volume, and you can clone _only_ in the same availability domain (AD). The result is that the cloned shard database PODs can be created _only_ in the same AD where the Gold Image Block Volume is present. -Choosing this option takes substantially less time during the Oracle Database Sharding Topology setup. +Choosing this option takes substantially less time during the Oracle Globally Distributed Database Topology setup. **NOTE** For this step, the Persistent Volume that has the Oracle Database Gold Image is identified using its OCID. @@ -16,14 +16,14 @@ Choosing this option takes substantially less time during the Oracle Database Sh kubectl get pv -n shns ``` -2. This example uses `udsharding_shard_prov_clone.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: +2. This example uses `udsharding_shard_prov_clone.yaml` to provision an Oracle Globally Distributed Database topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three sharding Pods: `shard1`, `shard2` and `shard3` +* Three Shard Database Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Database Cloning from the Database Gold Image present in Persistent Volume having OCID: `ocid1.volume.oc1.phx.abyhqljr3z3w72t6ay5eud7d5w3kdfhktfp6gwb6euy5tzwfaxgmbvwqlvsq` -* User Defined Sharding is specified using `shardingType: USER` +* User-Defined Sharding is specified using `shardingType: USER` NOTE: In this case, the Persistent Volume with DB Gold Image was provisioned in the Availability Domain `PHX-AD-1`. The Shards and Catalog will be provisioned in the same Availability Domain `PHX-AD-1` by cloning the database. @@ -33,7 +33,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) -**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. +**NOTE:** Provisioning the Oracle Globally Distributed Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. Use the file: [udsharding_shard_prov_clone.yaml](./udsharding_shard_prov_clone.yaml) for this use case as below: diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_control_on_resources.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_control_on_resources.md index b52b8745..8836baeb 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_control_on_resources.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_control_on_resources.md @@ -1,18 +1,18 @@ -# Provisioning Oracle Sharded Database with User Defined Sharding with additional control on resources like Memory and CPU allocated to Pods +# Provisioning Oracle Globally Distributed Database with User-Defined Sharding with additional control on resources like Memory and CPU allocated to Pods **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -In this use case, there are additional tags used to control resources such as CPU and Memory used by the different Pods when the Oracle Sharding topology with User Defined Sharding is deployed using Oracle Sharding controller. +In this use case, there are additional tags used to control resources such as CPU and Memory used by the different Pods when the Oracle Globally Distributed Database topology with User-Defined Sharding is deployed using Oracle Sharding controller. This example uses `udsharding_shard_prov_memory_cpu.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three sharding Pods: `shard1`, `shard2` and `shard3` +* Three Shard Database Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Tags `memory` and `cpu` to control the Memory and CPU of the PODs * Additional tags `INIT_SGA_SIZE` and `INIT_PGA_SIZE` to control the SGA and PGA allocation at the database level -* User Defined Sharding is specified using `shardingType: USER` +* User-Defined Sharding is specified using `shardingType: USER` In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_notification_using_oci_notification.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_notification_using_oci_notification.md index 640301a2..ea3a2802 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_notification_using_oci_notification.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_notification_using_oci_notification.md @@ -1,19 +1,19 @@ -# Provisioning Oracle Sharded Database with User Defined Sharding and send Notification using OCI Notification Service +# Provisioning Oracle Globally Distributed Database with User-Defined Sharding and send Notification using OCI Notification Service **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -This use case demonstrates how to use a notification service like OCI Notification service to send an email notification when a particular operation is completed on an Oracle Database sharding topology provisioned using the Oracle Database sharding controller. +This use case demonstrates how to use a notification service like OCI Notification service to send an email notification when a particular operation is completed on an Oracle Globally Distributed Database topology provisioned using the Oracle Database sharding controller. -This example uses `udsharding_shard_prov_send_notification.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: +This example uses `udsharding_shard_prov_send_notification.yaml` to provision an Oracle Globally Distributed Database topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three sharding Pods: `shard1`, `shard2` and `shard3` +* Three Shard Database Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Database Cloning from the `BLOCK VOLUME FULL BACKUP` of the Persistent Volume that has the Database Gold Image created earlier. * OCID of the Block Volume Backup: `ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq` * Configmap to send notification email when a particular operation is completed. For example: When a shard is added. -* User Defined Sharding is specified using `shardingType: USER` +* User-Defined Sharding is specified using `shardingType: USER` **NOTE:** @@ -68,7 +68,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. -**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. +**NOTE:** Provisioning the Oracle Globally Distributed Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. Use the file: [udsharding_shard_prov_send_notification.yaml](./udsharding_shard_prov_send_notification.yaml) for this use case as below: diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_without_db_gold_image.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_without_db_gold_image.md index 2be5ac9f..5b1d2db0 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_without_db_gold_image.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_without_db_gold_image.md @@ -1,18 +1,18 @@ -# Provisioning Oracle Sharded Database with User Defined Sharding without Database Gold Image +# Provisioning Oracle Globally Distributed Database with User-Defined Sharding without Database Gold Image **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with User Defined Sharding is deployed using Oracle Sharding controller. +In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with User-Defined Sharding is deployed using Oracle Sharding controller. **NOTE** In this use case, because DBCA creates the database automatically during the deployment, the time required to create the database is greater than the time it takes when the database is created by cloning from a Database Gold Image. -This example uses `udsharding_shard_prov.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: +This example uses `udsharding_shard_prov.yaml` to provision an Oracle Globally Distributed Database topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three sharding Pods: `shard1`, `shard2` and `shard3` +* Three Shard Database Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` -* User Defined Sharding is specified using `shardingType: USER` +* User-Defined Sharding is specified using `shardingType: USER` In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md index 2c4cbfc2..adb8af30 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md @@ -1,16 +1,16 @@ -# Scale In - Delete an existing Shard from a working Oracle Sharded Database provisioned earlier with User Defined Sharding +# Scale In - Delete an existing Shard from a working Oracle Globally Distributed Database provisioned earlier with User-Defined Sharding **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -This use case demonstrates how to delete an existing Shard from an existing Oracle Database sharding topology with User Defined Sharding provisioned using Oracle Database Sharding controller. +This use case demonstrates how to delete an existing Shard from an existing Oracle Globally Distributed Database topology with User-Defined Sharding provisioned using Oracle Database Sharding controller. In this use case, the existing database Sharding is having: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Five sharding Pods: `shard1`,`shard2`,`shard3`,`shard4` and `shard5` +* Five Shard Database Pods: `shard1`,`shard2`,`shard3`,`shard4` and `shard5` * One Catalog Pod: `catalog` * Namespace: `shns` -* User Defined Sharding is specified using `shardingType: USER` +* User-Defined Sharding is specified using `shardingType: USER` In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. @@ -20,7 +20,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services **NOTE:** Use tag `isDelete: enable` to delete the shard you want. -This use case deletes the shard `shard4` from the above Sharding Topology. +This use case deletes the shard `shard4` from the above Oracle Globally Distributed Database Topology. Use the file: [udsharding_shard_prov_delshard.yaml](./udsharding_shard_prov_delshard.yaml) for this use case as below: diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_out_add_shards.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_out_add_shards.md index 20f50b29..371b2438 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_out_add_shards.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_out_add_shards.md @@ -1,24 +1,24 @@ -# Scale Out - Add Shards to an existing Oracle Sharded Database provisioned earlier with User Defined Sharding +# Scale Out - Add Shards to an existing Oracle Globally Distributed Database provisioned earlier with User-Defined Sharding **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -This use case demonstrates adding a new shard to an existing Oracle Database sharding topology with User Defined Sharding provisioned earlier using Oracle Database Sharding controller. +This use case demonstrates adding a new shard to an existing Oracle Globally Distributed Database topology with User-Defined Sharding provisioned earlier using Oracle Database Sharding controller. -In this use case, the existing Oracle Database sharding topology is having: +In this use case, the existing Oracle Globally Distributed Database topology is having: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three sharding Pods: `shard1`, `shard2` and `shard3` +* Three Shard Database Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` -* User Defined Sharding is specified using `shardingType: USER` +* User-Defined Sharding is specified using `shardingType: USER` In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `udsharding_shard_prov_extshard.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) - * If the existing Sharding Topology was deployed using [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then the additional parameter `dbEdition: "free"` will be needed for the below .yaml file as well. + * If the existing Oracle Globally Distributed Database Topology was deployed using [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then the additional parameter `dbEdition: "free"` will be needed for the below .yaml file as well. -This use case adds two new shards `shard4`,`shard5` to above Sharding Topology. +This use case adds two new shards `shard4`,`shard5` to above Oracle Globally Distributed Database Topology. Use the file: [udsharding_shard_prov_extshard.yaml](./udsharding_shard_prov_extshard.yaml) for this use case as below: From 93099928ffcc8763998264727d864908088d74f5 Mon Sep 17 00:00:00 2001 From: jpverma85 Date: Wed, 11 Sep 2024 17:29:36 -0400 Subject: [PATCH 12/24] Known issues change (#147) * added known issues for sharding * added the known issue change --- docs/sharding/README.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/sharding/README.md b/docs/sharding/README.md index a5c7c470..076d7e32 100644 --- a/docs/sharding/README.md +++ b/docs/sharding/README.md @@ -33,6 +33,7 @@ Following sections provide the details for deploying Oracle Globally Distributed * [Provisioning Oracle Globally Distributed Database System-Managed Sharding with Raft replication enabled in a Cloud-Based Kubernetes Cluster](#provisioning-oracle-globally-distributed-database-topology-with-system-managed-sharding-and-raft-replication-enabled-in-a-cloud-based-kubernetes-cluster) * [Connecting to Shard Databases](#connecting-to-shard-databases) * [Debugging and Troubleshooting](#debugging-and-troubleshooting) +* [Known Issues](#known-issues) **Note** Before proceeding to the next section, you must complete the instructions given in each section, based on your enviornment, before proceeding to next section. @@ -187,3 +188,10 @@ After the Oracle Globally Distributed Database Topology has been provisioned usi ## Debugging and Troubleshooting To debug the Oracle Globally Distributed Database Topology provisioned using the Sharding Controller of Oracle Database Kubernetes Operator, follow this document: [Debugging and troubleshooting](./provisioning/debugging.md) + +## Known Issues + +* For both ENTERPRISE and FREE Images, if the GSM POD is stopped using `crictl stopp` at the worker node level, it leaves GSM in failed state with the `gdsctl` commands failing with error **GSM-45034: Connection to GDS catalog is not established**. It is beacause with change, the network namespace is lost if we check from the GSM Pod. +* For both ENTERPRISE and FREE Images, reboot of node running CATALOG using `/sbin/reboot -f` results in **GSM-45076: GSM IS NOT RUNNING**. Once you hit this issue, after waiting for a certain time, the `gdsctl` commands start working as the DB connection start working. Once the stack comes up fine after the node reboot, after some time, unexpected restart of GSM Pod is also observed. +* For both ENTERPRISE and FREE Images, reboot of node running the SHARD Pod using `/sbin/reboot -f` or stopping the Shard Database Pod from worker node using `crictl stopp` command leaves the shard in error state. +* For both ENTERPRISE and FREE Images, GSM pod restarts multiple times after force rebooting the node running GSM Pod. Its because when the worker node comes up, the GSM pod was recreated but it does not get DB connection to Catalog and meanwhile, the Liveness Probe fails which restart the Pod. \ No newline at end of file From d6ac2321e8a15a354a5e4958c281cd3be14afdc3 Mon Sep 17 00:00:00 2001 From: jpverma85 Date: Fri, 11 Oct 2024 14:53:06 -0400 Subject: [PATCH 13/24] doc_change (#151) --- docs/sharding/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/sharding/README.md b/docs/sharding/README.md index 076d7e32..134d338b 100644 --- a/docs/sharding/README.md +++ b/docs/sharding/README.md @@ -191,7 +191,7 @@ To debug the Oracle Globally Distributed Database Topology provisioned using the ## Known Issues -* For both ENTERPRISE and FREE Images, if the GSM POD is stopped using `crictl stopp` at the worker node level, it leaves GSM in failed state with the `gdsctl` commands failing with error **GSM-45034: Connection to GDS catalog is not established**. It is beacause with change, the network namespace is lost if we check from the GSM Pod. -* For both ENTERPRISE and FREE Images, reboot of node running CATALOG using `/sbin/reboot -f` results in **GSM-45076: GSM IS NOT RUNNING**. Once you hit this issue, after waiting for a certain time, the `gdsctl` commands start working as the DB connection start working. Once the stack comes up fine after the node reboot, after some time, unexpected restart of GSM Pod is also observed. -* For both ENTERPRISE and FREE Images, reboot of node running the SHARD Pod using `/sbin/reboot -f` or stopping the Shard Database Pod from worker node using `crictl stopp` command leaves the shard in error state. -* For both ENTERPRISE and FREE Images, GSM pod restarts multiple times after force rebooting the node running GSM Pod. Its because when the worker node comes up, the GSM pod was recreated but it does not get DB connection to Catalog and meanwhile, the Liveness Probe fails which restart the Pod. \ No newline at end of file +* For both ENTERPRISE and FREE Images, if the Oracle Global Service Manager (GSM) POD is stopped using `crictl stopp` at the worker node level, it leaves GSM in failed state. The `gdsctl` commands fail with error **GSM-45034: Connection to GDS catalog is not established**. This is because with change, the network namespace is lost when checked from the GSM Pod. +* For both ENTERPRISE and FREE Images, restart of the node running CATALOG using `/sbin/reboot -f` results in **GSM-45076: GSM IS NOT RUNNING**. After you encounter this issue, wait until the `gdsctl` commands start working as the database connection start working. When the stack comes up again after the node restart, you can encounter an unexpected restart of the GSM Pod. +* For both ENTERPRISE and FREE Images, either restart of node running the SHARD Pod using `/sbin/reboot -f` or stopping the Shard Database Pod from the worker node using `crictl stopp` command can leave the shard in an error state. +* For both ENTERPRISE and FREE Images, after force restarts of the node running GSM Pod, the GSM pod restarts multiple times, and then becomes stable. The GSM pod restarts itself because when the worker node comes up, the GSM pod is recreated, but does not obtain DB connection to the Catalog. The Liveness Probe fails which restarts the Pod. Be aware of this issue, and permit the GSM pod to become stable. \ No newline at end of file From 44f2afe186da2cd2295aa689f15d46a3f8b176fc Mon Sep 17 00:00:00 2001 From: jpverma85 Date: Thu, 17 Oct 2024 18:40:18 -0400 Subject: [PATCH 14/24] added known issues (#152) --- docs/sharding/README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/sharding/README.md b/docs/sharding/README.md index 134d338b..239dd767 100644 --- a/docs/sharding/README.md +++ b/docs/sharding/README.md @@ -193,5 +193,7 @@ To debug the Oracle Globally Distributed Database Topology provisioned using the * For both ENTERPRISE and FREE Images, if the Oracle Global Service Manager (GSM) POD is stopped using `crictl stopp` at the worker node level, it leaves GSM in failed state. The `gdsctl` commands fail with error **GSM-45034: Connection to GDS catalog is not established**. This is because with change, the network namespace is lost when checked from the GSM Pod. * For both ENTERPRISE and FREE Images, restart of the node running CATALOG using `/sbin/reboot -f` results in **GSM-45076: GSM IS NOT RUNNING**. After you encounter this issue, wait until the `gdsctl` commands start working as the database connection start working. When the stack comes up again after the node restart, you can encounter an unexpected restart of the GSM Pod. +* For both ENTERPRISE and FREE Images, if the CATALOG Database Pod is stopped from the worker node using the command `crictl stopp`, then it can leave the CATALOG in an error state. This error state results in GSM reporting the error message **GSM-45034: Connection to GDS catalog is not established.** * For both ENTERPRISE and FREE Images, either restart of node running the SHARD Pod using `/sbin/reboot -f` or stopping the Shard Database Pod from the worker node using `crictl stopp` command can leave the shard in an error state. -* For both ENTERPRISE and FREE Images, after force restarts of the node running GSM Pod, the GSM pod restarts multiple times, and then becomes stable. The GSM pod restarts itself because when the worker node comes up, the GSM pod is recreated, but does not obtain DB connection to the Catalog. The Liveness Probe fails which restarts the Pod. Be aware of this issue, and permit the GSM pod to become stable. \ No newline at end of file +* For both ENTERPRISE and FREE Images, after force restarts of the node running GSM Pod, the GSM pod restarts multiple times, and then becomes stable. The GSM pod restarts itself because when the worker node comes up, the GSM pod is recreated, but does not obtain DB connection to the Catalog. The Liveness Probe fails which restarts the Pod. Be aware of this issue, and permit the GSM pod to become stable. +* **DDL Propagation from Catalog to Shards:** DDL Propagation from the Catalog Database to the Shard Databases can take several minutes to complete. To see faster propagation of DDLs such as the tablespace set from the Catalog Database to the Shard Databases, Oracle recommends that you set smaller chunk values by using the `CATALOG_CHUNKS` attribute in the .yaml file while creating the Sharded Database Topology. \ No newline at end of file From a834cfb518f3db3455628080dcb5fc1ef05969ee Mon Sep 17 00:00:00 2001 From: jpverma85 Date: Mon, 21 Oct 2024 16:33:52 -0400 Subject: [PATCH 15/24] Uds doc change (#153) * added known issues * uds doc change --- .../udsharding_scale_in_delete_an_existing_shard.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md index adb8af30..e01e606f 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md @@ -26,7 +26,7 @@ Use the file: [udsharding_shard_prov_delshard.yaml](./udsharding_shard_prov_dels 1. Move out the chunks from the shard to be deleted to another shard. For example, in the current case, before deleting the `shard4`, if you want to move the chunks from `shard4` to `shard2`, then you can run the below `kubectl` command where `/u01/app/oracle/product/23ai/gsmhome_1` is the GSM HOME: ```sh - kubectl exec -it pod/gsm1-0 -n shns -- /u01/app/oracle/product/23ai/gsmhome_1/bin/gdsctl "move chunk -chunk all -source shard4_shard4pdb -target shard4_shard4pdb" + kubectl exec -it pod/gsm1-0 -n shns -- /u01/app/oracle/product/23ai/gsmhome_1/bin/gdsctl "move chunk -chunk all -source shard4_shard4pdb -target shard2_shard2pdb" ``` 2. Confirm the shard to be deleted (`shard4` in this case) is not having any chunk using below command: ```sh @@ -48,7 +48,7 @@ Use the file: [udsharding_shard_prov_delshard.yaml](./udsharding_shard_prov_dels - After you apply `udsharding_shard_prov_delshard.yaml`, the change may not be visible immediately and it may take some time for the delete operation to complete. - If the shard, that you are trying to delete, is still having chunks, then the you will see message like below in the logs of the Oracle Database Operator Pod. ```sh - INFO controllers.database.ShardingDatabase manual intervention required + DEBUG events Shard Deletion failed for [shard4]. Retry shard deletion after manually moving the chunks. Requeuing ``` In this case, you will need to first move out the chunks from the shard to be deleted using Step 2 above and then apply the file in Step 3 to delete that shard. From b2cd9b9a9f62318f13a24cc69027a2796de4b7ec Mon Sep 17 00:00:00 2001 From: Paramdeep Saini Date: Mon, 10 Mar 2025 15:52:13 +0000 Subject: [PATCH 16/24] Added Features of Oracle DB Operator Release 1.2.0 --- .gitignore | 2 +- Dockerfile | 18 +- Makefile | 113 +- PREREQUISITES.md | 8 +- PROJECT | 102 + README.md | 136 +- THIRD_PARTY_LICENSES.txt | 65 +- .../v1alpha1/adbfamily_common_spec.go | 67 + .../autonomouscontainerdatabase_types.go | 25 +- .../autonomouscontainerdatabase_webhook.go | 9 +- .../v1alpha1/autonomousdatabase_conversion.go | 371 + .../v1alpha1/autonomousdatabase_types.go | 333 +- .../v1alpha1/autonomousdatabase_webhook.go | 172 +- .../autonomousdatabase_webhook_test.go | 150 +- .../autonomousdatabasebackup_types.go | 15 +- .../autonomousdatabasebackup_webhook.go | 21 +- .../autonomousdatabasebackup_webhook_test.go | 18 +- .../autonomousdatabaserestore_types.go | 19 +- .../autonomousdatabaserestore_webhook.go | 16 +- .../autonomousdatabaserestore_webhook_test.go | 12 +- .../v1alpha1/dataguardbroker_conversion.go | 14 + .../v1alpha1/dataguardbroker_types.go | 73 +- .../v1alpha1/dataguardbroker_webhook.go | 10 + .../v1alpha1/dbcssystem_conversion.go | 14 + .../database/v1alpha1/dbcssystem_kms_types.go | 141 + .../v1alpha1/dbcssystem_pdbconfig_types.go | 83 + apis/database/v1alpha1/dbcssystem_types.go | 98 +- apis/database/v1alpha1/dbcssystem_webhook.go | 98 + .../oraclerestdataservice_conversion.go | 14 + .../v1alpha1/oraclerestdataservice_types.go | 35 +- .../v1alpha1/oraclerestdataservice_webhook.go | 3 - .../v1alpha1/shardingdatabase_conversion.go | 14 + .../v1alpha1/shardingdatabase_types.go | 25 +- .../v1alpha1/shardingdatabase_webhook.go | 48 +- .../singleinstancedatabase_conversion.go | 14 + .../v1alpha1/singleinstancedatabase_types.go | 16 +- .../singleinstancedatabase_webhook.go | 20 +- .../v1alpha1/zz_generated.deepcopy.go | 958 +- apis/database/v4/adbfamily_common_spec.go | 67 + .../adbfamily_utils.go} | 55 +- .../v4/autonomouscontainerdatabase_types.go | 226 + .../v4/autonomouscontainerdatabase_webhook.go | 110 + apis/database/v4/autonomousdatabase_types.go | 393 + .../database/v4/autonomousdatabase_webhook.go | 170 + .../v4/autonomousdatabasebackup_types.go | 129 + .../v4/autonomousdatabasebackup_webhook.go | 158 + .../v4/autonomousdatabaserestore_types.go | 142 + .../v4/autonomousdatabaserestore_webhook.go | 146 + apis/database/{v1alpha1 => v4}/cdb_types.go | 20 +- apis/database/{v1alpha1 => v4}/cdb_webhook.go | 11 +- .../database/v4/dataguardbroker_conversion.go | 4 + apis/database/v4/dataguardbroker_types.go | 163 + apis/database/v4/dataguardbroker_webhook.go | 55 + apis/database/v4/dbcssystem_conversion.go | 4 + apis/database/v4/dbcssystem_kms_types.go | 141 + .../database/v4/dbcssystem_pdbconfig_types.go | 83 + apis/database/v4/dbcssystem_types.go | 292 + apis/database/v4/dbcssystem_webhook.go | 98 + apis/database/v4/groupversion_info.go | 58 + apis/database/v4/lrest_types.go | 191 + apis/database/v4/lrest_webhook.go | 219 + apis/database/v4/lrpdb_types.go | 256 + apis/database/v4/lrpdb_webhook.go | 370 + .../v4/oraclerestdataservice_conversion.go | 4 + .../v4/oraclerestdataservice_types.go | 158 + .../v4/oraclerestdataservice_webhook.go | 55 + apis/database/v4/ordssrvs_types.go | 693 + apis/database/{v1alpha1 => v4}/pdb_types.go | 17 +- apis/database/{v1alpha1 => v4}/pdb_webhook.go | 40 +- .../v4/shardingdatabase_conversion.go | 4 + apis/database/v4/shardingdatabase_types.go | 427 + apis/database/v4/shardingdatabase_webhook.go | 314 + .../v4/singleinstancedatabase_conversion.go | 4 + .../v4/singleinstancedatabase_types.go | 231 + .../v4/singleinstancedatabase_webhook.go | 55 + apis/database/v4/zz_generated.deepcopy.go | 4213 +++++ .../v1/databaseobserver_types.go | 195 + .../v1/databaseobserver_webhook.go | 185 + apis/observability/v1/groupversion_info.go | 58 + .../observability/v1/zz_generated.deepcopy.go | 481 + .../v1alpha1/databaseobserver_types.go | 83 +- .../v1alpha1/databaseobserver_webhook.go | 8 +- .../v1alpha1/zz_generated.deepcopy.go | 201 +- .../v4/databaseobserver_types.go | 196 + .../v4/databaseobserver_webhook.go | 182 + apis/observability/v4/groupversion_info.go | 58 + .../observability/v4/zz_generated.deepcopy.go | 481 + commons/adb_family/utils.go | 18 +- commons/database/constants.go | 34 +- commons/database/podbuilder.go | 108 + commons/database/svcbuilder.go | 99 + commons/database/utils.go | 26 - commons/dbcssystem/dbcs_reconciler.go | 974 +- commons/dbcssystem/dcommon.go | 61 +- commons/k8s/create.go | 27 +- commons/k8s/fetch.go | 30 +- commons/multitenant/lrest/common.go | 113 + commons/observability/constants.go | 68 +- commons/observability/utils.go | 503 +- commons/oci/containerdatabase.go | 12 +- commons/oci/database.go | 331 +- commons/oci/provider.go | 8 +- commons/sharding/catalog.go | 58 +- commons/sharding/exec.go | 40 +- commons/sharding/gsm.go | 64 +- commons/sharding/provstatus.go | 227 +- commons/sharding/scommon.go | 160 +- commons/sharding/shard.go | 50 +- ...acle.com_autonomouscontainerdatabases.yaml | 100 +- ....oracle.com_autonomousdatabasebackups.yaml | 121 +- ...oracle.com_autonomousdatabaserestores.yaml | 121 +- ...tabase.oracle.com_autonomousdatabases.yaml | 597 +- .../crd/bases/database.oracle.com_cdbs.yaml | 92 +- .../database.oracle.com_dataguardbrokers.yaml | 140 +- .../database.oracle.com_dbcssystems.yaml | 562 +- .../crd/bases/database.oracle.com_lrests.yaml | 254 + .../crd/bases/database.oracle.com_lrpdbs.yaml | 369 + ...ase.oracle.com_oraclerestdataservices.yaml | 214 +- .../bases/database.oracle.com_ordssrvs.yaml | 488 + .../crd/bases/database.oracle.com_pdbs.yaml | 130 +- ...database.oracle.com_shardingdatabases.yaml | 701 +- ...se.oracle.com_singleinstancedatabases.yaml | 427 +- ...vability.oracle.com_databaseobservers.yaml | 6919 +++++++- config/crd/kustomization.yaml | 25 +- ...ction_in_autonomouscontainerdatabases.yaml | 2 +- ...njection_in_autonomousdatabasebackups.yaml | 2 +- ...jection_in_autonomousdatabaserestores.yaml | 2 +- ...njection_in_database_dataguardbrokers.yaml | 8 + .../cainjection_in_database_lrests.yaml | 8 + .../cainjection_in_database_lrpdbs.yaml | 8 + ...on_in_database_oraclerestdataservices.yaml | 8 + .../cainjection_in_database_ordssrvs.yaml | 8 + ...n_in_database_singleinstancedatabases.yaml | 8 + .../patches/cainjection_in_dbcssystems.yaml | 2 +- ...on_in_observability_databaseobservers.yaml | 8 + ...bhook_in_autonomouscontainerdatabases.yaml | 20 +- .../webhook_in_autonomousdatabasebackups.yaml | 20 +- ...webhook_in_autonomousdatabaserestores.yaml | 20 +- .../webhook_in_autonomousdatabases.yaml | 18 +- config/crd/patches/webhook_in_lrests.yaml | 17 + config/crd/patches/webhook_in_lrpdbs.yaml | 17 + config/crd/patches/webhook_in_ordssrvs.yaml | 17 + config/database.oracle.com_DbcsSystem.yaml | 209 +- ...database.oracle.com_shardingdatabases.yaml | 4 +- config/manager/kustomization.yaml | 2 +- ...tabase-operator.clusterserviceversion.yaml | 4 +- ...vability.oracle.com_databaseobservers.yaml | 9350 ++++++++++- config/rbac/lrest_editor_role.yaml | 24 + config/rbac/lrest_viewer_role.yaml | 20 + config/rbac/lrpdb_editor_role.yaml | 24 + config/rbac/lrpdb_viewer_role.yaml | 20 + config/rbac/ordssrvs_editor_role.yaml | 24 + config/rbac/ordssrvs_viewer_role.yaml | 20 + config/rbac/role.yaml | 398 +- .../samples/adb/autonomousdatabase_bind.yaml | 3 +- .../samples/adb/autonomousdatabase_clone.yaml | 35 + .../adb/autonomousdatabase_create.yaml | 3 +- .../autonomousdatabase_delete_resource.yaml | 2 +- .../adb/autonomousdatabase_rename.yaml | 3 +- .../samples/adb/autonomousdatabase_scale.yaml | 3 +- ...tonomousdatabase_stop_start_terminate.yaml | 6 +- ...onomousdatabase_update_admin_password.yaml | 5 +- .../adb/autonomousdatabase_update_mtls.yaml | 9 +- ...onomousdatabase_update_network_access.yaml | 57 +- .../adb/autonomousdatabase_wallet.yaml | 25 +- config/samples/kustomization.yaml | 18 + .../observability/v1/databaseobserver.yaml | 81 + ...databaseobserver_customization_fields.yaml | 54 + .../v1/databaseobserver_logs_promtail.yaml | 74 + .../v1alpha1/databaseobserver.yaml | 80 + .../databaseobserver_custom_config.yaml | 46 + .../databaseobserver_logs_promtail.yaml | 74 + .../v1alpha1/databaseobserver_minimal.yaml | 26 + .../v1alpha1/databaseobserver_vault.yaml | 30 + .../observability/v4/databaseobserver.yaml | 79 + .../v4/databaseobserver_custom_config.yaml | 46 + .../v4/databaseobserver_logs_promtail.yaml | 76 + .../v4/databaseobserver_minimal.yaml | 26 + .../v4/databaseobserver_vault.yaml | 39 + config/samples/sidb/dataguardbroker.yaml | 8 +- .../samples/sidb/oraclerestdataservice.yaml | 21 +- .../sidb/oraclerestdataservice_apex.yaml | 42 - .../sidb/oraclerestdataservice_create.yaml | 9 +- .../sidb/oraclerestdataservice_secrets.yaml | 18 - .../samples/sidb/singleinstancedatabase.yaml | 18 +- .../sidb/singleinstancedatabase_clone.yaml | 2 +- .../sidb/singleinstancedatabase_create.yaml | 4 +- .../sidb/singleinstancedatabase_express.yaml | 2 +- .../singleinstancedatabase_free-lite.yaml | 35 + ...singleinstancedatabase_free-truecache.yaml | 48 + .../sidb/singleinstancedatabase_free.yaml | 7 +- .../sidb/singleinstancedatabase_patch.yaml | 2 +- .../singleinstancedatabase_prebuiltdb.yaml | 2 +- .../sidb/singleinstancedatabase_standby.yaml | 4 +- .../sidb/singleinstancedatabase_tcps.yaml | 2 +- config/webhook/manifests.yaml | 412 +- .../autonomouscontainerdatabase_controller.go | 92 +- .../database/autonomousdatabase_controller.go | 1255 +- .../autonomousdatabasebackup_controller.go | 50 +- .../autonomousdatabaserestore_controller.go | 52 +- controllers/database/cdb_controller.go | 174 +- .../database/dataguardbroker_controller.go | 1199 -- controllers/database/dbcssystem_controller.go | 1345 +- controllers/database/lrest_controller.go | 1105 ++ controllers/database/lrpdb_controller.go | 2381 +++ .../oraclerestdataservice_controller.go | 593 +- controllers/database/ordssrvs_controller.go | 1116 ++ controllers/database/ordssrvs_ordsconfig.go | 258 + controllers/database/pdb_controller.go | 340 +- .../database/shardingdatabase_controller.go | 480 +- .../singleinstancedatabase_controller.go | 381 +- controllers/dataguard/datagauard_errors.go | 47 + controllers/dataguard/dataguard_utils.go | 1061 ++ .../dataguard/dataguardbroker_controller.go | 513 + .../databaseobserver_controller.go | 293 +- .../databaseobserver_resource.go | 132 +- docs/adb/ADB_LONG_TERM_BACKUP.md | 4 +- docs/adb/ADB_PREREQUISITES.md | 4 +- docs/adb/NETWORK_ACCESS_OPTIONS.md | 222 +- docs/adb/README.md | 173 +- docs/dbcs/README.md | 144 +- .../bind_to_existing_dbcs_system.md | 22 +- .../bind_to_existing_dbcs_system.yaml | 4 +- ..._to_existing_dbcs_system_sample_output.log | 141 +- docs/dbcs/provisioning/clone_dbcs_system.yaml | 20 + .../clone_dbcs_system_from_backup.yaml | 22 + ..._dbcs_system_from_backup_sample_output.log | 75 + .../clone_dbcs_system_from_database.yaml | 22 + ...bcs_system_from_database_sample_output.log | 39 + .../clone_dbcs_system_sample_output.log | 60 + .../provisioning/clone_from_backup_dbcs.md | 36 + docs/dbcs/provisioning/clone_from_database.md | 35 + .../provisioning/clone_from_existing_dbcs.md | 36 + .../dbcs/provisioning/create_dbcs_with_kms.md | 73 + .../dbcs/provisioning/create_dbcs_with_pdb.md | 55 + docs/dbcs/provisioning/create_kms.md | 50 + docs/dbcs/provisioning/create_pdb.md | 55 + .../create_pdb_to_existing_dbcs_system.md | 55 + ..._in_existing_dbcs_system_sample_output.log | 1 + ...reatepdb_in_existing_dbcs_system_list.yaml | 27 + ...xisting_dbcs_system_list_sample_output.log | 185 + .../dbcs_controller_parameters.md | 2 +- .../dbcs_service_migrate_to_kms.log | 132 + .../dbcs_service_migrate_to_kms.yaml | 16 + .../dbcs_service_with_2_node_rac.md | 42 +- .../dbcs_service_with_2_node_rac.yaml | 21 +- .../dbcs_service_with_all_parameters_asm.md | 41 +- .../dbcs_service_with_all_parameters_asm.yaml | 29 +- ..._with_all_parameters_asm_sample_output.log | 225 +- .../dbcs_service_with_all_parameters_lvm.md | 42 +- .../dbcs_service_with_all_parameters_lvm.yaml | 27 +- .../provisioning/dbcs_service_with_kms.yaml | 27 + .../dbcs_service_with_kms_sample_output.log | 91 + .../dbcs_service_with_minimal_parameters.md | 22 +- .../dbcs_service_with_minimal_parameters.yaml | 22 +- ..._with_minimal_parameters_sample_output.log | 175 +- .../provisioning/dbcs_service_with_pdb.yaml | 38 + .../dbcs_service_with_pdb_sample_output.log | 137 + docs/dbcs/provisioning/delete_pdb.md | 50 + ...eletepdb_in_existing_dbcs_system_list.yaml | 13 + ...xisting_dbcs_system_list_sample_output.log | 8 + docs/dbcs/provisioning/migrate_to_kms.md | 49 + .../scale_down_dbcs_system_shape.md | 30 +- .../scale_down_dbcs_system_shape.yaml | 17 +- ...e_down_dbcs_system_shape_sample_output.log | 371 +- .../scale_up_dbcs_system_shape.md | 32 +- .../scale_up_dbcs_system_shape.yaml | 17 +- docs/dbcs/provisioning/scale_up_storage.md | 30 +- docs/dbcs/provisioning/scale_up_storage.yaml | 19 +- .../scale_up_storage_sample_output.log | 440 +- .../provisioning/terminate_dbcs_system.md | 24 +- .../provisioning/terminate_dbcs_system.yaml | 2 +- .../terminate_dbcs_system_sample_output.log | 8 +- docs/dbcs/provisioning/update_license.md | 32 +- docs/dbcs/provisioning/update_license.yaml | 38 +- docs/multitenant/README.md | 259 +- docs/multitenant/lrest-based/README.md | 500 + .../lrest-based/images/Generalschema2.jpg | Bin 0 -> 96239 bytes .../lrest-based/images/UsecaseSchema.jpg | Bin 0 -> 185861 bytes .../multitenant/lrest-based/usecase/README.md | 139 + .../usecase/altersystem_pdb1_resource.yaml | 50 + .../usecase/cdbnamespace_binding.yaml | 13 + .../usecase/clone_pdb1_resource.yaml | 51 + .../usecase/clone_pdb2_resource.yaml | 51 + .../usecase/close_pdb1_resource.yaml | 47 + .../usecase/close_pdb2_resource.yaml | 47 + .../usecase/close_pdb3_resource.yaml | 47 + .../lrest-based/usecase/config-map-pdb.yaml | 11 + .../lrest-based/usecase/config_map_pdb.yaml | 11 + .../lrest-based/usecase/create_lrest_pod.yaml | 44 + .../usecase/create_pdb1_resource.yaml | 52 + .../usecase/create_pdb2_resource.yaml | 52 + .../usecase/delete_pdb1_resource.yaml | 45 + .../usecase/delete_pdb2_resource.yaml | 45 + docs/multitenant/lrest-based/usecase/makefile | 911 + .../usecase/map_pdb1_resource.yaml | 49 + .../usecase/map_pdb2_resource.yaml | 49 + .../usecase/map_pdb3_resource.yaml | 49 + .../usecase/open_pdb1_resource.yaml | 47 + .../usecase/open_pdb2_resource.yaml | 47 + .../usecase/open_pdb3_resource.yaml | 47 + .../lrest-based/usecase/parameters.txt | 52 + .../usecase/pdbnamespace_binding.yaml | 13 + .../usecase/plug_pdb1_resource.yaml | 54 + .../usecase/unplug_pdb1_resource.yaml | 46 + docs/multitenant/ords-based/NamespaceSeg.md | 14 + docs/multitenant/ords-based/README.md | 411 + .../images/K8S_NAMESPACE_SEG.png | Bin .../{ => ords-based}/images/K8S_SECURE1.png | Bin .../{ => ords-based}/images/K8S_SECURE2.png | Bin .../{ => ords-based}/images/K8S_SECURE3.png | Bin .../{ => ords-based}/images/K8S_SECURE4.png | Bin .../ords-based/images/makerunall.png | Bin 0 -> 211874 bytes .../ords-based/images/makesecrets_1_1.png | Bin 0 -> 117953 bytes .../{ => ords-based}/openssl_schema.jpg | Bin .../example_setup_using_oci_oke_cluster.md | 0 .../multinamespace/cdb_create.yaml | 30 +- .../multinamespace/pdb_clone.yaml | 22 +- .../multinamespace/pdb_close.yaml | 22 +- .../multinamespace/pdb_create.yaml | 22 +- .../multinamespace/pdb_delete.yaml | 15 +- .../provisioning/multinamespace/pdb_open.yaml | 22 +- .../provisioning/multinamespace/pdb_plug.yaml | 15 +- .../multinamespace/pdb_unplug.yaml | 14 +- .../ords-based/provisioning/ords_image.md | 81 + .../provisioning/quickOKEcreation.md | 0 .../singlenamespace}/cdb_create.yaml | 39 +- .../singlenamespace/cdb_secret.yaml | 0 .../singlenamespace/pdb_clone.yaml | 18 +- .../singlenamespace}/pdb_close.yaml | 22 +- .../singlenamespace}/pdb_create.yaml | 22 +- .../singlenamespace}/pdb_delete.yaml | 15 +- .../singlenamespace}/pdb_open.yaml | 22 +- .../singlenamespace}/pdb_plug.yaml | 13 +- .../singlenamespace/pdb_secret.yaml | 0 .../singlenamespace}/pdb_unplug.yaml | 12 +- docs/multitenant/ords-based/usecase/README.md | 112 + .../usecase/cdbnamespace_binding.yaml | 13 + .../usecase/clone_pdb1_resource.yaml | 50 + .../usecase/clone_pdb2_resource.yaml | 50 + .../usecase/close_pdb1_resource.yaml | 47 + .../usecase/close_pdb2_resource.yaml | 47 + .../usecase/close_pdb3_resource.yaml | 47 + .../ords-based/usecase/create_ords_pod.yaml | 48 + .../usecase/create_pdb1_resource.yaml | 51 + .../usecase/create_pdb2_resource.yaml | 51 + .../usecase/delete_pdb1_resource.yaml | 45 + .../usecase/delete_pdb2_resource.yaml | 45 + docs/multitenant/ords-based/usecase/makefile | 915 + .../ords-based/usecase/map_pdb1_resource.yaml | 49 + .../ords-based/usecase/map_pdb2_resource.yaml | 49 + .../ords-based/usecase/map_pdb3_resource.yaml | 49 + .../usecase/open_pdb1_resource.yaml | 47 + .../usecase/open_pdb2_resource.yaml | 47 + .../usecase/open_pdb3_resource.yaml | 47 + .../ords-based/usecase/parameters.txt | 61 + .../usecase/pdbnamespace_binding.yaml | 13 + .../usecase/plug_pdb1_resource.yaml | 53 + .../usecase/unplug_pdb1_resource.yaml | 46 + .../{ => ords-based}/usecase01/README.md | 199 +- .../{ => ords-based}/usecase01/ca.crt | 0 .../{ => ords-based}/usecase01/ca.key | 0 .../{ => ords-based}/usecase01/ca.srl | 0 .../usecase01}/cdb_create.yaml | 0 .../usecase01/cdb_secret.yaml | 0 .../usecase01/clone_pdb1_resource.yaml} | 32 +- .../usecase01/clone_pdb2_resource.yaml | 50 + .../usecase01/close_pdb1_resource.yaml | 47 + .../usecase01/close_pdb2_resource.yaml | 47 + .../usecase01/close_pdb3_resource.yaml | 47 + .../ords-based/usecase01/create_ords_pod.yaml | 48 + .../usecase01/create_pdb1_resource.yaml | 51 + .../usecase01/create_pdb2_resource.yaml | 51 + .../usecase01/delete_pdb1_resource.yaml | 45 + .../usecase01/delete_pdb2_resource.yaml | 45 + .../{ => ords-based}/usecase01/extfile.txt | 0 .../usecase01/logfiles/BuildImage.log | 896 + .../usecase01/logfiles/ImagePush.log | 0 .../usecase01/logfiles/cdb.log | 0 .../usecase01/logfiles/cdb_creation.log | 0 .../usecase01/logfiles/openssl_execution.log | 19 + .../usecase01/logfiles/ordsconfig.log | 39 + .../usecase01/logfiles/tagandpush.log | 0 .../usecase01/logfiles/testapi.log | 0 .../multitenant/ords-based/usecase01/makefile | 906 + .../usecase01/map_pdb1_resource.yaml | 49 + .../usecase01/map_pdb2_resource.yaml | 49 + .../usecase01/map_pdb3_resource.yaml | 49 + .../usecase01/open_pdb1_resource.yaml | 47 + .../usecase01/open_pdb2_resource.yaml | 47 + .../usecase01/open_pdb3_resource.yaml | 47 + ...acle-database-operator-system_binding.yaml | 13 + .../usecase01/oracle-database-operator.yaml | 0 .../ords-based/usecase01/parameters.txt | 61 + .../usecase01}/pdb_close.yaml | 0 .../usecase01}/pdb_create.yaml | 0 .../usecase01}/pdb_delete.yaml | 0 .../{ => ords-based}/usecase01/pdb_map.yaml | 0 .../usecase01}/pdb_open.yaml | 0 .../usecase01/pdb_secret.yaml | 0 .../usecase01/plug_pdb1_resource.yaml} | 31 +- .../{ => ords-based}/usecase01/server.csr | 0 .../usecase01/tde_secret.yaml | 0 .../{ => ords-based}/usecase01/tls.crt | 0 .../{ => ords-based}/usecase01/tls.key | 0 .../usecase01/unplug_pdb1_resource.yaml} | 27 +- .../{ => ords-based}/usecase02/README.md | 144 +- .../ords-based/usecase02/pdb_clone.yaml | 50 + .../ords-based/usecase02/pdb_plug.yaml | 53 + .../usecase02/pdb_plugtde.yaml | 2 +- .../ords-based/usecase02/pdb_unplug.yaml | 46 + .../usecase02/pdb_unplugtde.yaml | 2 +- docs/multitenant/usecase02/tde_secret.yaml | 15 - docs/observability/README.md | 550 +- docs/ordsservices/README.md | 67 + docs/ordsservices/TROUBLESHOOTING.md | 129 + docs/ordsservices/api.md | 1388 ++ docs/ordsservices/autoupgrade.md | 57 + docs/ordsservices/examples/adb.md | 108 + docs/ordsservices/examples/adb_oraoper.md | 176 + docs/ordsservices/examples/mongo_api.md | 160 + docs/ordsservices/examples/multi_pool.md | 203 + docs/ordsservices/examples/sidb_container.md | 154 + .../usecase01/create_mong_schema.sql | 9 + docs/ordsservices/usecase01/help | 1 + docs/ordsservices/usecase01/makefile | 778 + .../usecase01/tnsadmin/tnsnames.ora | 3 + .../usecase01/tnsadmin/tnsnames.ora.offline | 1 + docs/sharding/README.md | 78 +- .../create_kubernetes_secret_for_db_user.md | 12 +- .../provisioning/database_connection.md | 6 +- docs/sharding/provisioning/debugging.md | 28 +- .../sharding_provisioning_with_db_events.md | 8 +- .../sharding_provisioning_with_db_events.yaml | 3 +- .../sharding_provisioning_with_free_images.md | 8 +- ...harding_provisioning_with_free_images.yaml | 7 +- ...y_cloning_db_from_gold_image_across_ads.md | 14 +- ...ing_by_cloning_db_gold_image_in_same_ad.md | 14 +- ...ding_provisioning_with_chunks_specified.md | 10 +- ..._provisioning_with_control_on_resources.md | 6 +- ...ith_notification_using_oci_notification.md | 12 +- ...ding_provisioning_without_db_gold_image.md | 8 +- ...rding_scale_in_delete_an_existing_shard.md | 6 +- .../snr_ssharding_scale_out_add_shards.md | 10 +- .../snr_ssharding_shard_prov.yaml | 3 +- .../snr_ssharding_shard_prov_chunks.yaml | 3 +- .../snr_ssharding_shard_prov_clone.yaml | 3 +- ...ssharding_shard_prov_clone_across_ads.yaml | 3 +- .../snr_ssharding_shard_prov_delshard.yaml | 3 +- .../snr_ssharding_shard_prov_extshard.yaml | 3 +- .../snr_ssharding_shard_prov_memory_cpu.yaml | 3 +- ...sharding_shard_prov_send_notification.yaml | 3 +- ...y_cloning_db_from_gold_image_across_ads.md | 8 +- ...ing_by_cloning_db_gold_image_in_same_ad.md | 6 +- ...ding_provisioning_with_chunks_specified.md | 6 +- ..._provisioning_with_control_on_resources.md | 4 +- ...ith_notification_using_oci_notification.md | 8 +- ...ding_provisioning_without_db_gold_image.md | 4 +- ...rding_scale_in_delete_an_existing_shard.md | 8 +- .../ssharding_scale_out_add_shards.md | 10 +- .../system_sharding/ssharding_shard_prov.yaml | 3 +- .../ssharding_shard_prov_chunks.yaml | 59 + .../ssharding_shard_prov_clone.yaml | 3 +- ...ssharding_shard_prov_clone_across_ads.yaml | 3 +- .../ssharding_shard_prov_delshard.yaml | 3 +- .../ssharding_shard_prov_extshard.yaml | 3 +- .../ssharding_shard_prov_memory_cpu.yaml | 3 +- ...sharding_shard_prov_send_notification.yaml | 3 +- ...y_cloning_db_from_gold_image_across_ads.md | 14 +- ...ing_by_cloning_db_gold_image_in_same_ad.md | 12 +- ..._provisioning_with_control_on_resources.md | 8 +- ...ith_notification_using_oci_notification.md | 12 +- ...ding_provisioning_without_db_gold_image.md | 10 +- ...rding_scale_in_delete_an_existing_shard.md | 14 +- .../udsharding_scale_out_add_shards.md | 14 +- .../udsharding_shard_prov.yaml | 3 +- .../udsharding_shard_prov_clone.yaml | 3 +- ...dsharding_shard_prov_clone_across_ads.yaml | 3 +- .../udsharding_shard_prov_delshard.yaml | 3 +- .../udsharding_shard_prov_extshard.yaml | 3 +- .../udsharding_shard_prov_memory_cpu.yaml | 3 +- ...sharding_shard_prov_send_notification.yaml | 3 +- docs/sidb/PREREQUISITES.md | 13 +- docs/sidb/README.md | 377 +- go.mod | 98 +- go.sum | 245 +- main.go | 123 +- oracle-database-operator.yaml | 13962 +++++++++++++--- ords/Dockerfile | 14 +- ords/ords_init.sh | 484 + ords/runOrdsSSL.sh | 19 +- test/e2e/autonomouscontainerdatabase_test.go | 8 +- ...autonomousdatabase_controller_bind_test.go | 44 +- ...tonomousdatabase_controller_create_test.go | 109 +- test/e2e/behavior/shared_behaviors.go | 133 +- test/e2e/suite_test.go | 6 +- 496 files changed, 74952 insertions(+), 11904 deletions(-) create mode 100644 apis/database/v1alpha1/adbfamily_common_spec.go create mode 100644 apis/database/v1alpha1/autonomousdatabase_conversion.go create mode 100644 apis/database/v1alpha1/dataguardbroker_conversion.go create mode 100644 apis/database/v1alpha1/dbcssystem_conversion.go create mode 100644 apis/database/v1alpha1/dbcssystem_kms_types.go create mode 100644 apis/database/v1alpha1/dbcssystem_pdbconfig_types.go create mode 100644 apis/database/v1alpha1/dbcssystem_webhook.go create mode 100644 apis/database/v1alpha1/oraclerestdataservice_conversion.go create mode 100644 apis/database/v1alpha1/shardingdatabase_conversion.go create mode 100644 apis/database/v1alpha1/singleinstancedatabase_conversion.go create mode 100644 apis/database/v4/adbfamily_common_spec.go rename apis/database/{v1alpha1/adbfamily_common_utils.go => v4/adbfamily_utils.go} (87%) create mode 100644 apis/database/v4/autonomouscontainerdatabase_types.go create mode 100644 apis/database/v4/autonomouscontainerdatabase_webhook.go create mode 100644 apis/database/v4/autonomousdatabase_types.go create mode 100644 apis/database/v4/autonomousdatabase_webhook.go create mode 100644 apis/database/v4/autonomousdatabasebackup_types.go create mode 100644 apis/database/v4/autonomousdatabasebackup_webhook.go create mode 100644 apis/database/v4/autonomousdatabaserestore_types.go create mode 100644 apis/database/v4/autonomousdatabaserestore_webhook.go rename apis/database/{v1alpha1 => v4}/cdb_types.go (93%) rename apis/database/{v1alpha1 => v4}/cdb_webhook.go (92%) create mode 100644 apis/database/v4/dataguardbroker_conversion.go create mode 100644 apis/database/v4/dataguardbroker_types.go create mode 100644 apis/database/v4/dataguardbroker_webhook.go create mode 100644 apis/database/v4/dbcssystem_conversion.go create mode 100644 apis/database/v4/dbcssystem_kms_types.go create mode 100644 apis/database/v4/dbcssystem_pdbconfig_types.go create mode 100644 apis/database/v4/dbcssystem_types.go create mode 100644 apis/database/v4/dbcssystem_webhook.go create mode 100644 apis/database/v4/groupversion_info.go create mode 100644 apis/database/v4/lrest_types.go create mode 100644 apis/database/v4/lrest_webhook.go create mode 100644 apis/database/v4/lrpdb_types.go create mode 100644 apis/database/v4/lrpdb_webhook.go create mode 100644 apis/database/v4/oraclerestdataservice_conversion.go create mode 100644 apis/database/v4/oraclerestdataservice_types.go create mode 100644 apis/database/v4/oraclerestdataservice_webhook.go create mode 100644 apis/database/v4/ordssrvs_types.go rename apis/database/{v1alpha1 => v4}/pdb_types.go (96%) rename apis/database/{v1alpha1 => v4}/pdb_webhook.go (85%) create mode 100644 apis/database/v4/shardingdatabase_conversion.go create mode 100644 apis/database/v4/shardingdatabase_types.go create mode 100644 apis/database/v4/shardingdatabase_webhook.go create mode 100644 apis/database/v4/singleinstancedatabase_conversion.go create mode 100644 apis/database/v4/singleinstancedatabase_types.go create mode 100644 apis/database/v4/singleinstancedatabase_webhook.go create mode 100644 apis/database/v4/zz_generated.deepcopy.go create mode 100644 apis/observability/v1/databaseobserver_types.go create mode 100644 apis/observability/v1/databaseobserver_webhook.go create mode 100644 apis/observability/v1/groupversion_info.go create mode 100644 apis/observability/v1/zz_generated.deepcopy.go create mode 100644 apis/observability/v4/databaseobserver_types.go create mode 100644 apis/observability/v4/databaseobserver_webhook.go create mode 100644 apis/observability/v4/groupversion_info.go create mode 100644 apis/observability/v4/zz_generated.deepcopy.go create mode 100644 commons/database/podbuilder.go create mode 100644 commons/database/svcbuilder.go create mode 100644 commons/multitenant/lrest/common.go create mode 100644 config/crd/bases/database.oracle.com_lrests.yaml create mode 100644 config/crd/bases/database.oracle.com_lrpdbs.yaml create mode 100644 config/crd/bases/database.oracle.com_ordssrvs.yaml create mode 100644 config/crd/patches/cainjection_in_database_dataguardbrokers.yaml create mode 100644 config/crd/patches/cainjection_in_database_lrests.yaml create mode 100644 config/crd/patches/cainjection_in_database_lrpdbs.yaml create mode 100644 config/crd/patches/cainjection_in_database_oraclerestdataservices.yaml create mode 100644 config/crd/patches/cainjection_in_database_ordssrvs.yaml create mode 100644 config/crd/patches/cainjection_in_database_singleinstancedatabases.yaml create mode 100644 config/crd/patches/cainjection_in_observability_databaseobservers.yaml create mode 100644 config/crd/patches/webhook_in_lrests.yaml create mode 100644 config/crd/patches/webhook_in_lrpdbs.yaml create mode 100644 config/crd/patches/webhook_in_ordssrvs.yaml create mode 100644 config/rbac/lrest_editor_role.yaml create mode 100644 config/rbac/lrest_viewer_role.yaml create mode 100644 config/rbac/lrpdb_editor_role.yaml create mode 100644 config/rbac/lrpdb_viewer_role.yaml create mode 100644 config/rbac/ordssrvs_editor_role.yaml create mode 100644 config/rbac/ordssrvs_viewer_role.yaml create mode 100644 config/samples/adb/autonomousdatabase_clone.yaml create mode 100644 config/samples/observability/v1/databaseobserver.yaml create mode 100644 config/samples/observability/v1/databaseobserver_customization_fields.yaml create mode 100644 config/samples/observability/v1/databaseobserver_logs_promtail.yaml create mode 100644 config/samples/observability/v1alpha1/databaseobserver.yaml create mode 100644 config/samples/observability/v1alpha1/databaseobserver_custom_config.yaml create mode 100644 config/samples/observability/v1alpha1/databaseobserver_logs_promtail.yaml create mode 100644 config/samples/observability/v1alpha1/databaseobserver_minimal.yaml create mode 100644 config/samples/observability/v1alpha1/databaseobserver_vault.yaml create mode 100644 config/samples/observability/v4/databaseobserver.yaml create mode 100644 config/samples/observability/v4/databaseobserver_custom_config.yaml create mode 100644 config/samples/observability/v4/databaseobserver_logs_promtail.yaml create mode 100644 config/samples/observability/v4/databaseobserver_minimal.yaml create mode 100644 config/samples/observability/v4/databaseobserver_vault.yaml delete mode 100644 config/samples/sidb/oraclerestdataservice_apex.yaml create mode 100644 config/samples/sidb/singleinstancedatabase_free-lite.yaml create mode 100644 config/samples/sidb/singleinstancedatabase_free-truecache.yaml delete mode 100644 controllers/database/dataguardbroker_controller.go create mode 100644 controllers/database/lrest_controller.go create mode 100644 controllers/database/lrpdb_controller.go create mode 100644 controllers/database/ordssrvs_controller.go create mode 100644 controllers/database/ordssrvs_ordsconfig.go create mode 100644 controllers/dataguard/datagauard_errors.go create mode 100644 controllers/dataguard/dataguard_utils.go create mode 100644 controllers/dataguard/dataguardbroker_controller.go create mode 100644 docs/dbcs/provisioning/clone_dbcs_system.yaml create mode 100644 docs/dbcs/provisioning/clone_dbcs_system_from_backup.yaml create mode 100644 docs/dbcs/provisioning/clone_dbcs_system_from_backup_sample_output.log create mode 100644 docs/dbcs/provisioning/clone_dbcs_system_from_database.yaml create mode 100644 docs/dbcs/provisioning/clone_dbcs_system_from_database_sample_output.log create mode 100644 docs/dbcs/provisioning/clone_dbcs_system_sample_output.log create mode 100644 docs/dbcs/provisioning/clone_from_backup_dbcs.md create mode 100644 docs/dbcs/provisioning/clone_from_database.md create mode 100644 docs/dbcs/provisioning/clone_from_existing_dbcs.md create mode 100644 docs/dbcs/provisioning/create_dbcs_with_kms.md create mode 100644 docs/dbcs/provisioning/create_dbcs_with_pdb.md create mode 100644 docs/dbcs/provisioning/create_kms.md create mode 100644 docs/dbcs/provisioning/create_pdb.md create mode 100644 docs/dbcs/provisioning/create_pdb_to_existing_dbcs_system.md create mode 100644 docs/dbcs/provisioning/createkms_in_existing_dbcs_system_sample_output.log create mode 100644 docs/dbcs/provisioning/createpdb_in_existing_dbcs_system_list.yaml create mode 100644 docs/dbcs/provisioning/createpdb_in_existing_dbcs_system_list_sample_output.log create mode 100644 docs/dbcs/provisioning/dbcs_service_migrate_to_kms.log create mode 100644 docs/dbcs/provisioning/dbcs_service_migrate_to_kms.yaml create mode 100644 docs/dbcs/provisioning/dbcs_service_with_kms.yaml create mode 100644 docs/dbcs/provisioning/dbcs_service_with_kms_sample_output.log create mode 100644 docs/dbcs/provisioning/dbcs_service_with_pdb.yaml create mode 100644 docs/dbcs/provisioning/dbcs_service_with_pdb_sample_output.log create mode 100644 docs/dbcs/provisioning/delete_pdb.md create mode 100644 docs/dbcs/provisioning/deletepdb_in_existing_dbcs_system_list.yaml create mode 100644 docs/dbcs/provisioning/deletepdb_in_existing_dbcs_system_list_sample_output.log create mode 100644 docs/dbcs/provisioning/migrate_to_kms.md create mode 100644 docs/multitenant/lrest-based/README.md create mode 100644 docs/multitenant/lrest-based/images/Generalschema2.jpg create mode 100644 docs/multitenant/lrest-based/images/UsecaseSchema.jpg create mode 100644 docs/multitenant/lrest-based/usecase/README.md create mode 100644 docs/multitenant/lrest-based/usecase/altersystem_pdb1_resource.yaml create mode 100644 docs/multitenant/lrest-based/usecase/cdbnamespace_binding.yaml create mode 100644 docs/multitenant/lrest-based/usecase/clone_pdb1_resource.yaml create mode 100644 docs/multitenant/lrest-based/usecase/clone_pdb2_resource.yaml create mode 100644 docs/multitenant/lrest-based/usecase/close_pdb1_resource.yaml create mode 100644 docs/multitenant/lrest-based/usecase/close_pdb2_resource.yaml create mode 100644 docs/multitenant/lrest-based/usecase/close_pdb3_resource.yaml create mode 100644 docs/multitenant/lrest-based/usecase/config-map-pdb.yaml create mode 100644 docs/multitenant/lrest-based/usecase/config_map_pdb.yaml create mode 100644 docs/multitenant/lrest-based/usecase/create_lrest_pod.yaml create mode 100644 docs/multitenant/lrest-based/usecase/create_pdb1_resource.yaml create mode 100644 docs/multitenant/lrest-based/usecase/create_pdb2_resource.yaml create mode 100644 docs/multitenant/lrest-based/usecase/delete_pdb1_resource.yaml create mode 100644 docs/multitenant/lrest-based/usecase/delete_pdb2_resource.yaml create mode 100644 docs/multitenant/lrest-based/usecase/makefile create mode 100644 docs/multitenant/lrest-based/usecase/map_pdb1_resource.yaml create mode 100644 docs/multitenant/lrest-based/usecase/map_pdb2_resource.yaml create mode 100644 docs/multitenant/lrest-based/usecase/map_pdb3_resource.yaml create mode 100644 docs/multitenant/lrest-based/usecase/open_pdb1_resource.yaml create mode 100644 docs/multitenant/lrest-based/usecase/open_pdb2_resource.yaml create mode 100644 docs/multitenant/lrest-based/usecase/open_pdb3_resource.yaml create mode 100644 docs/multitenant/lrest-based/usecase/parameters.txt create mode 100644 docs/multitenant/lrest-based/usecase/pdbnamespace_binding.yaml create mode 100644 docs/multitenant/lrest-based/usecase/plug_pdb1_resource.yaml create mode 100644 docs/multitenant/lrest-based/usecase/unplug_pdb1_resource.yaml create mode 100644 docs/multitenant/ords-based/NamespaceSeg.md create mode 100644 docs/multitenant/ords-based/README.md rename docs/multitenant/{ => ords-based}/images/K8S_NAMESPACE_SEG.png (100%) rename docs/multitenant/{ => ords-based}/images/K8S_SECURE1.png (100%) rename docs/multitenant/{ => ords-based}/images/K8S_SECURE2.png (100%) rename docs/multitenant/{ => ords-based}/images/K8S_SECURE3.png (100%) rename docs/multitenant/{ => ords-based}/images/K8S_SECURE4.png (100%) create mode 100644 docs/multitenant/ords-based/images/makerunall.png create mode 100644 docs/multitenant/ords-based/images/makesecrets_1_1.png rename docs/multitenant/{ => ords-based}/openssl_schema.jpg (100%) rename docs/multitenant/{ => ords-based}/provisioning/example_setup_using_oci_oke_cluster.md (100%) rename docs/multitenant/{ => ords-based}/provisioning/multinamespace/cdb_create.yaml (58%) rename docs/multitenant/{ => ords-based}/provisioning/multinamespace/pdb_clone.yaml (74%) rename docs/multitenant/{ => ords-based}/provisioning/multinamespace/pdb_close.yaml (66%) rename docs/multitenant/{ => ords-based}/provisioning/multinamespace/pdb_create.yaml (67%) rename docs/multitenant/{ => ords-based}/provisioning/multinamespace/pdb_delete.yaml (70%) rename docs/multitenant/{ => ords-based}/provisioning/multinamespace/pdb_open.yaml (66%) rename docs/multitenant/{ => ords-based}/provisioning/multinamespace/pdb_plug.yaml (80%) rename docs/multitenant/{ => ords-based}/provisioning/multinamespace/pdb_unplug.yaml (77%) create mode 100644 docs/multitenant/ords-based/provisioning/ords_image.md rename docs/multitenant/{ => ords-based}/provisioning/quickOKEcreation.md (100%) rename docs/multitenant/{usecase01 => ords-based/provisioning/singlenamespace}/cdb_create.yaml (50%) rename docs/multitenant/{ => ords-based}/provisioning/singlenamespace/cdb_secret.yaml (100%) rename docs/multitenant/{ => ords-based}/provisioning/singlenamespace/pdb_clone.yaml (78%) rename docs/multitenant/{usecase01 => ords-based/provisioning/singlenamespace}/pdb_close.yaml (67%) rename docs/multitenant/{usecase01 => ords-based/provisioning/singlenamespace}/pdb_create.yaml (69%) rename docs/multitenant/{usecase01 => ords-based/provisioning/singlenamespace}/pdb_delete.yaml (72%) rename docs/multitenant/{usecase01 => ords-based/provisioning/singlenamespace}/pdb_open.yaml (67%) rename docs/multitenant/{usecase02 => ords-based/provisioning/singlenamespace}/pdb_plug.yaml (81%) rename docs/multitenant/{ => ords-based}/provisioning/singlenamespace/pdb_secret.yaml (100%) rename docs/multitenant/{usecase02 => ords-based/provisioning/singlenamespace}/pdb_unplug.yaml (78%) create mode 100644 docs/multitenant/ords-based/usecase/README.md create mode 100644 docs/multitenant/ords-based/usecase/cdbnamespace_binding.yaml create mode 100644 docs/multitenant/ords-based/usecase/clone_pdb1_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase/clone_pdb2_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase/close_pdb1_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase/close_pdb2_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase/close_pdb3_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase/create_ords_pod.yaml create mode 100644 docs/multitenant/ords-based/usecase/create_pdb1_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase/create_pdb2_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase/delete_pdb1_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase/delete_pdb2_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase/makefile create mode 100644 docs/multitenant/ords-based/usecase/map_pdb1_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase/map_pdb2_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase/map_pdb3_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase/open_pdb1_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase/open_pdb2_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase/open_pdb3_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase/parameters.txt create mode 100644 docs/multitenant/ords-based/usecase/pdbnamespace_binding.yaml create mode 100644 docs/multitenant/ords-based/usecase/plug_pdb1_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase/unplug_pdb1_resource.yaml rename docs/multitenant/{ => ords-based}/usecase01/README.md (85%) rename docs/multitenant/{ => ords-based}/usecase01/ca.crt (100%) rename docs/multitenant/{ => ords-based}/usecase01/ca.key (100%) rename docs/multitenant/{ => ords-based}/usecase01/ca.srl (100%) rename docs/multitenant/{provisioning/singlenamespace => ords-based/usecase01}/cdb_create.yaml (100%) rename docs/multitenant/{ => ords-based}/usecase01/cdb_secret.yaml (100%) rename docs/multitenant/{usecase02/pdb_clone.yaml => ords-based/usecase01/clone_pdb1_resource.yaml} (57%) create mode 100644 docs/multitenant/ords-based/usecase01/clone_pdb2_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase01/close_pdb1_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase01/close_pdb2_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase01/close_pdb3_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase01/create_ords_pod.yaml create mode 100644 docs/multitenant/ords-based/usecase01/create_pdb1_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase01/create_pdb2_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase01/delete_pdb1_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase01/delete_pdb2_resource.yaml rename docs/multitenant/{ => ords-based}/usecase01/extfile.txt (100%) create mode 100644 docs/multitenant/ords-based/usecase01/logfiles/BuildImage.log rename docs/multitenant/{ => ords-based}/usecase01/logfiles/ImagePush.log (100%) rename docs/multitenant/{ => ords-based}/usecase01/logfiles/cdb.log (100%) rename docs/multitenant/{ => ords-based}/usecase01/logfiles/cdb_creation.log (100%) create mode 100644 docs/multitenant/ords-based/usecase01/logfiles/openssl_execution.log create mode 100644 docs/multitenant/ords-based/usecase01/logfiles/ordsconfig.log rename docs/multitenant/{ => ords-based}/usecase01/logfiles/tagandpush.log (100%) rename docs/multitenant/{ => ords-based}/usecase01/logfiles/testapi.log (100%) create mode 100644 docs/multitenant/ords-based/usecase01/makefile create mode 100644 docs/multitenant/ords-based/usecase01/map_pdb1_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase01/map_pdb2_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase01/map_pdb3_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase01/open_pdb1_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase01/open_pdb2_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase01/open_pdb3_resource.yaml create mode 100644 docs/multitenant/ords-based/usecase01/oracle-database-operator-system_binding.yaml rename docs/multitenant/{ => ords-based}/usecase01/oracle-database-operator.yaml (100%) create mode 100644 docs/multitenant/ords-based/usecase01/parameters.txt rename docs/multitenant/{provisioning/singlenamespace => ords-based/usecase01}/pdb_close.yaml (100%) rename docs/multitenant/{provisioning/singlenamespace => ords-based/usecase01}/pdb_create.yaml (100%) rename docs/multitenant/{provisioning/singlenamespace => ords-based/usecase01}/pdb_delete.yaml (100%) rename docs/multitenant/{ => ords-based}/usecase01/pdb_map.yaml (100%) rename docs/multitenant/{provisioning/singlenamespace => ords-based/usecase01}/pdb_open.yaml (100%) rename docs/multitenant/{ => ords-based}/usecase01/pdb_secret.yaml (100%) rename docs/multitenant/{provisioning/singlenamespace/pdb_plug.yaml => ords-based/usecase01/plug_pdb1_resource.yaml} (63%) rename docs/multitenant/{ => ords-based}/usecase01/server.csr (100%) rename docs/multitenant/{ => ords-based}/usecase01/tde_secret.yaml (100%) rename docs/multitenant/{ => ords-based}/usecase01/tls.crt (100%) rename docs/multitenant/{ => ords-based}/usecase01/tls.key (100%) rename docs/multitenant/{provisioning/singlenamespace/pdb_unplug.yaml => ords-based/usecase01/unplug_pdb1_resource.yaml} (59%) rename docs/multitenant/{ => ords-based}/usecase02/README.md (69%) create mode 100644 docs/multitenant/ords-based/usecase02/pdb_clone.yaml create mode 100644 docs/multitenant/ords-based/usecase02/pdb_plug.yaml rename docs/multitenant/{ => ords-based}/usecase02/pdb_plugtde.yaml (96%) create mode 100644 docs/multitenant/ords-based/usecase02/pdb_unplug.yaml rename docs/multitenant/{ => ords-based}/usecase02/pdb_unplugtde.yaml (96%) delete mode 100644 docs/multitenant/usecase02/tde_secret.yaml create mode 100644 docs/ordsservices/README.md create mode 100644 docs/ordsservices/TROUBLESHOOTING.md create mode 100644 docs/ordsservices/api.md create mode 100644 docs/ordsservices/autoupgrade.md create mode 100644 docs/ordsservices/examples/adb.md create mode 100644 docs/ordsservices/examples/adb_oraoper.md create mode 100644 docs/ordsservices/examples/mongo_api.md create mode 100644 docs/ordsservices/examples/multi_pool.md create mode 100644 docs/ordsservices/examples/sidb_container.md create mode 100644 docs/ordsservices/usecase01/create_mong_schema.sql create mode 100644 docs/ordsservices/usecase01/help create mode 100644 docs/ordsservices/usecase01/makefile create mode 100644 docs/ordsservices/usecase01/tnsadmin/tnsnames.ora create mode 100644 docs/ordsservices/usecase01/tnsadmin/tnsnames.ora.offline create mode 100644 docs/sharding/provisioning/system_sharding/ssharding_shard_prov_chunks.yaml create mode 100644 ords/ords_init.sh diff --git a/.gitignore b/.gitignore index 98fbc1c4..51923538 100644 --- a/.gitignore +++ b/.gitignore @@ -7,7 +7,7 @@ ords/*zip .gitattributes .vscode .gitlab-ci.yml - +.DS_Store # development .idea .local diff --git a/Dockerfile b/Dockerfile index 11a56962..f444d508 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,13 +6,16 @@ ARG BUILDER_IMG FROM ${BUILDER_IMG} as builder -# Download golang if BUILD_INTERNAL is set to true +ARG TARGETARCH +# Download golang if INSTALL_GO is set to true ARG INSTALL_GO ARG GOLANG_VERSION RUN if [ "$INSTALL_GO" = "true" ]; then \ - curl -LJO https://go.dev/dl/go${GOLANG_VERSION}.linux-amd64.tar.gz &&\ - rm -rf /usr/local/go && tar -C /usr/local -xzf go${GOLANG_VERSION}.linux-amd64.tar.gz &&\ - rm go${GOLANG_VERSION}.linux-amd64.tar.gz; \ + echo -e "\nCurrent Arch: $(arch), Downloading Go for linux/${TARGETARCH}" &&\ + curl -LJO https://go.dev/dl/go${GOLANG_VERSION}.linux-${TARGETARCH}.tar.gz &&\ + rm -rf /usr/local/go && tar -C /usr/local -xzf go${GOLANG_VERSION}.linux-${TARGETARCH}.tar.gz &&\ + rm go${GOLANG_VERSION}.linux-${TARGETARCH}.tar.gz; \ + echo "Go Arch: $(/usr/local/go/bin/go env GOARCH)"; \ fi ENV PATH=${GOLANG_VERSION:+"${PATH}:/usr/local/go/bin"} @@ -33,16 +36,17 @@ COPY LICENSE.txt LICENSE.txt COPY THIRD_PARTY_LICENSES_DOCKER.txt THIRD_PARTY_LICENSES_DOCKER.txt # Build -RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o manager main.go +RUN CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} GO111MODULE=on go build -a -o manager main.go -# Use oraclelinux:8-slim as base image to package the manager binary -FROM oraclelinux:8-slim +# Use oraclelinux:9 as base image to package the manager binary +FROM oraclelinux:9 ARG CI_COMMIT_SHA ARG CI_COMMIT_BRANCH ENV COMMIT_SHA=${CI_COMMIT_SHA} \ COMMIT_BRANCH=${CI_COMMIT_BRANCH} WORKDIR / COPY --from=builder /workspace/manager . +COPY ords/ords_init.sh . RUN useradd -u 1002 nonroot USER nonroot diff --git a/Makefile b/Makefile index 88e14843..b9755e6f 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ # -# Copyright (c) 2022, Oracle and/or its affiliates. +# Copyright (c) 2025, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # @@ -18,14 +18,14 @@ BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL) # Image URL to use all building/pushing image targets IMG ?= controller:latest -# Produce CRDs that work back to Kubernetes 1.11 (no version conversion) -# API version has to be v1 to use defaulting (https://github.com/kubernetes-sigs/controller-tools/issues/478) -CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=false" +# Enable allowDangerousTypes to use float type in CRD +# Remove the Desc to avoid YAML getting too long. See the discussion: +# https://github.com/kubernetes-sigs/kubebuilder/issues/1140 +CRD_OPTIONS ?= "crd:maxDescLen=0,allowDangerousTypes=true" # ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. -ENVTEST_K8S_VERSION = 1.21 +ENVTEST_K8S_VERSION = 1.29.0 # Operator YAML file OPERATOR_YAML=$$(basename $$(pwd)).yaml - # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) ifeq (,$(shell go env GOBIN)) GOBIN=$(shell go env GOPATH)/bin @@ -40,69 +40,82 @@ SHELL = /usr/bin/env bash -o pipefail .SHELLFLAGS = -ec all: build - ##@ Development manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases - + generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." - + fmt: ## Run go fmt against code. go fmt ./... - + vet: ## Run go vet against code. go vet ./... - + TEST ?= ./apis/database/v1alpha1 ./commons/... ./controllers/... test: manifests generate fmt vet envtest ## Run unit tests. KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test $(TEST) -coverprofile cover.out - + E2ETEST ?= ./test/e2e/ e2e: manifests generate fmt vet envtest ## Run e2e tests. KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test $(E2ETEST) -test.timeout 0 -test.v --ginkgo.fail-fast - + ##@ Build - + build: generate fmt vet ## Build manager binary. go build -o bin/manager main.go - + run: manifests generate fmt vet ## Run a controller from your host. go run ./main.go - -GOLANG_VERSION ?= 1.21.7 + +GOLANG_VERSION ?= 1.23.3 ## Download golang in the Dockerfile if BUILD_INTERNAL is set to true. ## Otherwise, use golang image from docker hub as the builder. ifeq ($(BUILD_INTERNAL), true) -BUILDER_IMG = oraclelinux:8 +BUILDER_IMG = oraclelinux:9 BUILD_ARGS = --build-arg BUILDER_IMG=$(BUILDER_IMG) --build-arg GOLANG_VERSION=$(GOLANG_VERSION) --build-arg INSTALL_GO=true else BUILDER_IMG = golang:$(GOLANG_VERSION) -BUILD_ARGS = --build-arg BUILDER_IMG=$(BUILDER_IMG) --build-arg INSTALL_GO=false +BUILD_ARGS = --build-arg BUILDER_IMG=$(BUILDER_IMG) --build-arg INSTALL_GO="false" --build-arg GOLANG_VERSION=$(GOLANG_VERSION) +endif +ifeq ($(BUILD_MANIFEST), true) +BUILD_ARGS := $(BUILD_ARGS) --platform=linux/arm64,linux/amd64 --jobs=2 --manifest +PUSH_ARGS := manifest +else +BUILD_ARGS := $(BUILD_ARGS) --platform=linux/amd64 --tag endif docker-build: #manifests generate fmt vet #test ## Build docker image with the manager. Disable the test but keep the validations to fail fast docker build --no-cache=true --build-arg http_proxy=$(HTTP_PROXY) --build-arg https_proxy=$(HTTPS_PROXY) \ - --build-arg CI_COMMIT_SHA=$(CI_COMMIT_SHA) --build-arg CI_COMMIT_BRANCH=$(CI_COMMIT_BRANCH) \ - $(BUILD_ARGS) . -t $(IMG) - + --build-arg CI_COMMIT_SHA=$(CI_COMMIT_SHA) --build-arg CI_COMMIT_BRANCH=$(CI_COMMIT_BRANCH) \ + $(BUILD_ARGS) $(IMG) . + docker-push: ## Push docker image with the manager. - docker push $(IMG) + docker $(PUSH_ARGS) push $(IMG) -##@ Deployment +# Push to minikube's local registry enabled by registry add-on +minikube-push: + docker tag $(IMG) $$(minikube ip):5000/$(IMG) + docker push --tls-verify=false $$(minikube ip):5000/$(IMG) +##@ Deployment + install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. $(KUSTOMIZE) build config/crd | kubectl apply -f - - + uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. $(KUSTOMIZE) build config/crd | kubectl delete -f - - + deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) $(KUSTOMIZE) build config/default | kubectl apply -f - +minikube-deploy: minikube-operator-yaml minikube-push + kubectl apply -f $(OPERATOR_YAML) + # Bug:34265574 -# Used sed to reposition the controller-manager Deployment after the certificate creation in the OPERATOR_YAML +# Used sed to reposition the controller-manager Deployment after the certificate creation in the OPERATOR_YAML operator-yaml: manifests kustomize cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) $(KUSTOMIZE) build config/default > "$(OPERATOR_YAML)" @@ -110,57 +123,62 @@ operator-yaml: manifests kustomize (echo --- && sed '/^apiVersion: apps\/v1/,/---/!d' "$(OPERATOR_YAML).bak") >> "$(OPERATOR_YAML)" rm "$(OPERATOR_YAML).bak" +minikube-operator-yaml: IMG:=localhost:5000/$(IMG) +minikube-operator-yaml: operator-yaml + sed -i.bak 's/\(replicas.\) 3/\1 1/g' "$(OPERATOR_YAML)" + rm "$(OPERATOR_YAML).bak" + undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. $(KUSTOMIZE) build config/default | kubectl delete -f - - + ##@ Build Dependencies - + ## Location to install dependencies to LOCALBIN ?= $(shell pwd)/bin $(LOCALBIN): mkdir -p $(LOCALBIN) - + ## Tool Binaries KUSTOMIZE ?= $(LOCALBIN)/kustomize CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen ENVTEST ?= $(LOCALBIN)/setup-envtest - + ## Tool Versions -KUSTOMIZE_VERSION ?= v3.8.7 -CONTROLLER_TOOLS_VERSION ?= v0.6.1 - +KUSTOMIZE_VERSION ?= v5.3.0 +CONTROLLER_TOOLS_VERSION ?= v0.16.5 + KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" .PHONY: kustomize kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. $(KUSTOMIZE): $(LOCALBIN) curl -s $(KUSTOMIZE_INSTALL_SCRIPT) | bash -s -- $(subst v,,$(KUSTOMIZE_VERSION)) $(LOCALBIN) - + .PHONY: controller-gen controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. $(CONTROLLER_GEN): $(LOCALBIN) GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION) - + .PHONY: envtest envtest: $(ENVTEST) ## Download envtest-setup locally if necessary. $(ENVTEST): $(LOCALBIN) GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest - - + + .PHONY: bundle bundle: manifests kustomize ## Generate bundle manifests and metadata, then validate generated files. operator-sdk generate kustomize manifests -q cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) $(KUSTOMIZE) build config/manifests | operator-sdk generate bundle -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) operator-sdk bundle validate ./bundle - + .PHONY: bundle-build bundle-build: ## Build the bundle image. docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) . - + .PHONY: bundle-push bundle-push: ## Push the bundle image. $(MAKE) docker-push IMG=$(BUNDLE_IMG) - + .PHONY: opm OPM = ./bin/opm opm: ## Download opm locally if necessary. @@ -172,33 +190,32 @@ ifeq (,$(shell which opm 2>/dev/null)) OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \ curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.15.1/$${OS}-$${ARCH}-opm ;\ chmod +x $(OPM) ;\ - } + } else OPM = $(shell which opm) endif endif - + # A comma-separated list of bundle images (e.g. make catalog-build BUNDLE_IMGS=example.com/operator-bundle:v0.1.0,example.com/operator-bundle:v0.2.0). # These images MUST exist in a registry and be pull-able. BUNDLE_IMGS ?= $(BUNDLE_IMG) - + # The image tag given to the resulting catalog image (e.g. make catalog-build CATALOG_IMG=example.com/operator-catalog:v0.2.0). CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION) - + # Set CATALOG_BASE_IMG to an existing catalog image tag to add $BUNDLE_IMGS to that image. ifneq ($(origin CATALOG_BASE_IMG), undefined) FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG) endif - + # Build a catalog image by adding bundle images to an empty catalog using the operator package manager tool, 'opm'. # This recipe invokes 'opm' in 'semver' bundle add mode. For more information on add modes, see: # https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator .PHONY: catalog-build catalog-build: opm ## Build a catalog image. $(OPM) index add --container-tool docker --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT) - + # Push the catalog image. .PHONY: catalog-push catalog-push: ## Push a catalog image. $(MAKE) docker-push IMG=$(CATALOG_IMG) - diff --git a/PREREQUISITES.md b/PREREQUISITES.md index bc333357..3c73ad4b 100644 --- a/PREREQUISITES.md +++ b/PREREQUISITES.md @@ -2,7 +2,7 @@ ## Prerequisites for Using Oracle Database Operator for Kubernetes -Oracle Database Operator for Kubernetes (OraOperator) manages all Cloud deployments of Oracle Database, including: +Oracle Database Operator for Kubernetes (`OraOperator`) manages all Cloud deployments of Oracle Database, including: * Oracle Autonomous Database (ADB) * Containerized Oracle Database Single Instance (SIDB) @@ -19,15 +19,15 @@ To set up a Kubernetes cluster on Oracle Cloud Infrastructure: 1. Create an OKE Cluster 1. Provision persistent storage for data files (NFS or Block) -Note: You must provision persistent storage if you intend to deploy containerized databases over the OKE cluster. +Note: If you intend to deploy containerized databases over the OKE cluster, then you must provision persistent storage. ### Prerequites for Oracle Autonomous Database (ADB) -If you intent to use `OraOperator` to handle Oracle Autonomous Database lifecycles, then read [Oracle Autonomous Database prerequisites](./docs/adb/ADB_PREREQUISITES.md) +If you intend to use `OraOperator` to handle Oracle Autonomous Database lifecycles, then read [Oracle Autonomous Database prerequisites](./docs/adb/ADB_PREREQUISITES.md) ### Prerequites for Single Instance Databases (SIDB) -If you intent to use `OraOperator` to handle Oracle Database Single Instance lifecycles, then read [Single Instance Database Prerequisites](./docs/sidb/PREREQUISITES.md) +If you intend to use `OraOperator` to handle Oracle Database Single Instance lifecycles, then read [Single Instance Database Prerequisites](./docs/sidb/PREREQUISITES.md) ### Prerequites for Oracle Globally Distributed Databases(GDD) diff --git a/PROJECT b/PROJECT index fbf861db..97e9409c 100644 --- a/PROJECT +++ b/PROJECT @@ -131,6 +131,10 @@ resources: kind: DbcsSystem path: github.com/oracle/oracle-database-operator/apis/database/v1alpha1 version: v1alpha1 + webhooks: + defaulting: true + validation: true + webhookVersion: v1beta1 - api: crdVersion: v1beta1 namespaced: true @@ -157,4 +161,102 @@ resources: defaulting: true validation: true webhookVersion: v1beta1 +- api: + crdVersion: v1beta1 + namespaced: true + domain: oracle.com + group: database + kind: ShardingDatabase + path: github.com/oracle/oracle-database-operator/apis/database/v4 + version: v4 + webhooks: + conversion: true + webhookVersion: v1beta1 +- api: + crdVersion: v1beta1 + namespaced: true + domain: oracle.com + group: database + kind: DbcsSystem + path: github.com/oracle/oracle-database-operator/apis/database/v4 + version: v4 +- api: + crdVersion: v1beta1 + namespaced: true + domain: oracle.com + group: database + kind: LREST + path: github.com/oracle/oracle-database-operator/apis/database/v4 + version: v4 +- api: + crdVersion: v1beta1 + namespaced: true + domain: oracle.com + group: database + kind: LRPDB + path: github.com/oracle/oracle-database-operator/apis/database/v4 + version: v4 +- api: + crdVersion: v1beta1 + namespaced: true + domain: oracle.com + group: database + kind: OrdsSrvs + path: github.com/oracle/oracle-database-operator/apis/database/v4 + version: v4 +- api: + crdVersion: v1beta1 + namespaced: true + domain: oracle.com + group: observability + kind: DatabaseObserver + path: github.com/oracle/oracle-database-operator/apis/observability/v1 + version: v1 + webhooks: + conversion: true + webhookVersion: v1beta1 +- api: + crdVersion: v1beta1 + namespaced: true + domain: oracle.com + group: observability + kind: DatabaseObserver + path: github.com/oracle/oracle-database-operator/apis/observability/v4 + version: v4 + webhooks: + conversion: true + webhookVersion: v1beta1 +- api: + crdVersion: v1beta1 + namespaced: true + domain: oracle.com + group: database + kind: SingleInstanceDatabase + path: github.com/oracle/oracle-database-operator/apis/database/v4 + version: v4 + webhooks: + conversion: true + webhookVersion: v1beta1 +- api: + crdVersion: v1beta1 + namespaced: true + domain: oracle.com + group: database + kind: DataguardBroker + path: github.com/oracle/oracle-database-operator/apis/database/v4 + version: v4 + webhooks: + conversion: true + webhookVersion: v1beta1 +- api: + crdVersion: v1beta1 + namespaced: true + domain: oracle.com + group: database + kind: OracleRestDataService + path: github.com/oracle/oracle-database-operator/apis/database/v4 + version: v4 + webhooks: + conversion: true + webhookVersion: v1beta1 version: "3" diff --git a/README.md b/README.md index 3409463b..7afa79e8 100644 --- a/README.md +++ b/README.md @@ -2,53 +2,68 @@ ## Make Oracle Database Kubernetes Native -As part of Oracle's resolution to make Oracle Database Kubernetes native (that is, observable and operable by Kubernetes), Oracle released _Oracle Database Operator for Kubernetes_ (`OraOperator` or the operator). OraOperator extends the Kubernetes API with custom resources and controllers for automating Oracle Database lifecycle management. +As part of Oracle's resolution to make Oracle Database Kubernetes native (that is, observable and operable by Kubernetes), Oracle released the _Oracle Database Operator for Kubernetes_ (`OraOperator` or the operator). OraOperator extends the Kubernetes API with custom resources and controllers for automating the management of the Oracle Database lifecycle. -In this v1.1.0 production release, `OraOperator` supports the following database configurations and infrastructure: +## Supported Database Configurations in V1.2.0 +In this v1.2.0 production release, `OraOperator` supports the following database configurations, and controllers: * Oracle Autonomous Database: * Oracle Autonomous Database shared Oracle Cloud Infrastructure (OCI) (ADB-S) * Oracle Autonomous Database on dedicated Cloud infrastructure (ADB-D) - * Oracle Autonomous Container Database (ACD) (infrastructure) is the infrastructure for provisioning Autonomous Databases. + * Oracle Autonomous Container Database (ACD), the infrastructure for provisioning Autonomous Databases. * Containerized Single Instance databases (SIDB) deployed in the Oracle Kubernetes Engine (OKE) and any k8s where OraOperator is deployed * Containerized Oracle Globally Distributed Databases(GDD) deployed in OKE and any k8s where OraOperator is deployed * Oracle Multitenant Databases (CDB/PDBs) -* Oracle Base Database Cloud Service (BDBCS) -* Oracle Data Guard (Preview status) -* Oracle Database Observability (Preview status) - -Oracle will continue to extend `OraOperator` to support additional Oracle Database configurations. - -## New in V1.1.0 Release -* Namespace scope deployment option -* Enhanced security with namespace scope deployment option -* Support for Oracle Database 23ai Free (with SIDB) -* Automatic Storage Expansion for SIDB and Oracle Globally Distributed Database -* User-Defined Sharding -* TCPS support customer provided certs -* Execute custom scripts during DB setup/startup -* Patching for SIDB Primary/Standby in Data Guard -* Long-term backup for Autonomous Databases (ADB): Support for [long-term retention backup](https://docs.oracle.com/en/cloud/paas/autonomous-database/serverless/adbsb/backup-long-term.html) and removed support for the deprecated mandatory backup -* Wallet expiry date for ADB: A user-friendly enhancement to display the wallet expiry date in the status of the associated ADB -* Wait-for-Completion option for ADB: Supports `kubectl wait` command that allows the user to wait for a specific condition on ADB -* OKE workload Identify: Supports OKE workload identity authentication method (i.e., uses OKE credentials). For more details, refer to [Oracle Autonomous Database (ADB) Prerequisites](docs/adb/ADB_PREREQUISITES.md#authorized-with-oke-workload-identity) -* Database Observability (Preview - Metrics) - -## Features Summary +* Oracle Base Database Service (OBDS) on Oracle Cloud Infrastructure (OCI) +* Oracle Data Guard +* Oracle Database Observability +* Oracle Database Rest Service (ORDS) instances + +## New Lifecycle Features in V1.2.0 Release (Controllers Enhancements) +* ORDSSERVICES + - Install on SIDB and ADB + - Provision and Delete ORDS instances +* SIDB + - Oracle Database 23ai Free support + - Oracle Database 23ai Free-lite support + - SIDB resource management + - True Cache support for Free SIDB databases (Preview) + - Observer for FastStartFailover with Data Guard + - Snapshot Standby support in Data Guard setup +* Globally Distributed Database : Support for Oracle Database 23ai Raft replication +* Autonomous Database: support for Database cloning +* Multitenant DB: + - ORDS-based Controller: assertive deletion policy. + - New LRES based Controller (ARM & AM) + - PDBs settings with init parameters config map + - Assertive deletion policy. +* Database Observability (preview) + - Support for Database Logs (in addition to Metrics) + - Support for the latest Exporter container images + - Bug Fix: Prometheus label config +* Oracle Base Database Service: support for Oracle Database 23ai Cloning, using KMS Vaults, PDB creation. + +## New Product Features +*The Operator itself, as a product, brings the following new features: +* Published on `operatorhub.io` +* Operator Lifecycle Manager (OLM) support (install from `operatorhub.io`) +* Validated on Google Kubernetes Engine + +## Overall Features Summary This release of Oracle Database Operator for Kubernetes (the operator) supports the following lifecycle operations: -* ADB-S/ADB-D: Provision, bind, start, stop, terminate (soft/hard), scale (up/down), long-term backup, manual restore -* ACD: provision, bind, restart, terminate (soft/hard) -* SIDB: Provision, clone, patch (in-place/out-of-place), update database initialization parameters, update database configuration (Flashback, archiving), Oracle Enterprise Manager (EM) Express (a basic observability console), Oracle REST Data Service (ORDS) to support REST based SQL, PDB management, SQL Developer Web, and Application Express (Apex) -* GDD: Provision/deploy Oracle Globally Distributed Databases and the GDD topology, Add a new shard, Delete an existing shard -* Oracle Multitenant Database: Bind to a CDB, Create a  PDB, Plug a  PDB, Unplug a PDB, Delete a PDB, Clone a PDB, Open/Close a PDB -* Oracle Base Database Cloud Service (BDBCS): provision, bind, scale shape Up/Down, Scale Storage Up, Terminate and Update License +* ADB-S/ADB-D: Provision, bind, start, stop, terminate (soft/hard), scale (up/down), long-term backup, manual restore, cloning. +* ACD: Provision, bind, restart, terminate (soft/hard) +* SIDB: Provision, clone, patch (in-place/out-of-place), update database initialization parameters, update database configuration (Flashback, archiving), Oracle Enterprise Manager (EM) Express (basic console), Oracle REST Data Service (ORDS) to support REST based SQL, PDB management, SQL Developer Web, Application Express (Apex), Resource management, True Cache, Observer for FastStartFailover (Data Guard), and Snapshot Standby (Data Guard) +* ORDS Services: Provision and delete ORDS instances +* Globally Distrib. (Sharded): Provision/deploy sharded databases and the shard topology, Add a new shard, Delete an existing shard, Raft replication. +* Oracle Multitenant Database (choice of controller): Bind to a CDB, Create a  PDB, Plug a  PDB, Unplug a PDB, Delete a PDB, Clone a PDB, Open/Close a PDB, Assertive deletion policy +* Oracle Base Database Service (OBDS): Provision, bind, scale shape Up/Down, Scale Storage Up, Terminate and Update License, Cloning, PDB creation, using KMS Vaults on Oracle Cloud Infrastructure (OCI) * Oracle Data Guard: Provision a Standby for the SIDB resource, Create a Data Guard Configuration, Perform a Switchover, Patch Primary and Standby databases in Data Guard Configuration -* Oracle Database Observability: create, patch, delete databaseObserver resources -* Watch over a set of namespaces or all the namespaces in the cluster using the "WATCH_NAMESPACE" env variable of the operator deployment +* Oracle Database Observability: create, patch, delete `databaseObserver` resources (Logs and Metrics) +* Watch over a set of namespaces or all the namespaces in the cluster using the `WATCH_NAMESPACE` environment variable of the operator deployment -The upcoming releases will support new configurations, operations, and capabilities. ## Release Status @@ -56,11 +71,11 @@ This production release has been installed and tested on the following Kubernete * [Oracle Container Engine for Kubernetes (OKE)](https://www.oracle.com/cloud-native/container-engine-kubernetes/) with Kubernetes 1.24 * [Oracle Linux Cloud Native Environment(OLCNE)](https://docs.oracle.com/en/operating-systems/olcne/) 1.6 -* [Minikube](https://minikube.sigs.k8s.io/docs/) with version v1.29.0 -* [Azure Kubernetes Service](https://azure.microsoft.com/en-us/services/kubernetes-service/) +* [Azure Kubernetes Service](https://azure.microsoft.com/en-us/services/kubernetes-service/) * [Amazon Elastic Kubernetes Service](https://aws.amazon.com/eks/) +* [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/docs) * [Red Hat OKD](https://www.okd.io/) -* [Red Hat OpenShift](https://www.redhat.com/en/technologies/cloud-computing/openshift/) +* [Minikube](https://minikube.sigs.k8s.io/docs/) with version v1.29.0 ## Prerequisites @@ -68,12 +83,12 @@ Oracle strongly recommends that you ensure your system meets the following [Prer * ### Install cert-manager - The operator uses webhooks for validating user input before persisting it in etcd. Webhooks require TLS certificates that are generated and managed by a certificate manager. + The operator uses webhooks for validating user input before persisting it in `etcd`. Webhooks require TLS certificates that are generated and managed by a certificate manager. Install the certificate manager with the following command: ```sh - kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.4/cert-manager.yaml + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.2/cert-manager.yaml ``` * ### Create Role Bindings for Access Management @@ -83,8 +98,8 @@ Oracle strongly recommends that you ensure your system meets the following [Prer This is the default mode, in which OraOperator is deployed to operate in a cluster, and to monitor all the namespaces in the cluster. - - Grant the `serviceaccount:oracle-database-operator-system:default` cluster wide access for the resources by applying [cluster-role-binding.yaml](./rbac/cluster-role-binding.yaml) - + - Grant the `serviceaccount:oracle-database-operator-system:default` clusterwide access for the resources by applying [cluster-role-binding.yaml](./rbac/cluster-role-binding.yaml) + ```sh kubectl apply -f rbac/cluster-role-binding.yaml ``` @@ -97,31 +112,30 @@ Oracle strongly recommends that you ensure your system meets the following [Prer ##### 2. Namespace Scoped Deployment - In this mode, OraOperator can be deployed to operate in a namespace, and to monitor one or many namespaces. + In this mode, `OraOperator` can be deployed to operate in a namespace, and to monitor one or many namespaces. - - Grant `serviceaccount:oracle-database-operator-system:default` service account with resource access in the required namespaces. For example, to monitor only the default namespace, apply the [default-ns-role-binding.yaml](./rbac/default-ns-role-binding.yaml) + - Grant `serviceaccount:oracle-database-operator-system:default` service account with resource access in the required namespaces. For example, to monitor only the default namespace, apply the [`default-ns-role-binding.yaml`](./rbac/default-ns-role-binding.yaml) ```sh kubectl apply -f rbac/default-ns-role-binding.yaml ``` To watch additional namespaces, create different role binding files for each namespace, using [default-ns-role-binding.yaml](./rbac/default-ns-role-binding.yaml) as a template, and changing the `metadata.name` and `metadata.namespace` fields - - Next, edit the [oracle-database-operator.yaml](./oracle-database-operator.yaml) to add the required namespaces under `WATCH_NAMESPACE`. Use comma-delimited values for multiple namespaces. + - Next, edit the [`oracle-database-operator.yaml`](./oracle-database-operator.yaml) to add the required namespaces under `WATCH_NAMESPACE`. Use comma-delimited values for multiple namespaces. ```sh - name: WATCH_NAMESPACE value: "default" ``` - - Finally, apply the edited [oracle-database-operator.yaml](./oracle-database-operator.yaml) to deploy the Operator + - Finally, apply the edited [`oracle-database-operator.yaml`](./oracle-database-operator.yaml) to deploy the Operator ```sh kubectl apply -f oracle-database-operator.yaml ``` - * ### ClusterRole and ClusterRoleBinding for NodePort services - To expose services on each node's IP and port (the NodePort) apply the [node-rbac.yaml](./rbac/node-rbac.yaml). Note that this step is not required for LoadBalancer services. + To expose services on each node's IP and port (the NodePort), apply the [`node-rbac.yaml`](./rbac/node-rbac.yaml). Note that this step is not required for LoadBalancer services. ```sh kubectl apply -f rbac/node-rbac.yaml @@ -137,11 +151,21 @@ Oracle strongly recommends that you ensure your system meets the following [Prer kubectl apply -f oracle-database-operator.yaml ``` - Ensure that the operator pods are up and running. For high availability, Operator pod replicas are set to a default of 3. You can scale this setting up or down. +## Install Oracle DB Operator + + After you have completed the preceding prerequisite changes, you can install the operator. To install the operator in the cluster quickly, you can apply the modified `oracle-database-operator.yaml` file from the preceding step. + + Run the following command + + ```sh + kubectl apply -f oracle-database-operator.yaml + ``` + + Ensure that the operator pods are up and running. For high availability, operator pod replicas are set to a default of 3. You can scale this setting up or down. ```sh $ kubectl get pods -n oracle-database-operator-system - + NAME READY STATUS RESTARTS AGE pod/oracle-database-operator-controller-manager-78666fdddb-s4xcm 1/1 Running 0 11d pod/oracle-database-operator-controller-manager-78666fdddb-5k6n4 1/1 Running 0 11d @@ -164,7 +188,11 @@ The following quickstarts are designed for specific database configurations: * [Containerized Oracle Single Instance Database and Data Guard](./docs/sidb/README.md) * [Containerized Oracle Globally Distributed Database](./docs/sharding/README.md) * [Oracle Multitenant Database](./docs/multitenant/README.md) -* [Oracle Base Database Cloud Service (BDBCS)](./docs/dbcs/README.md) +* [Oracle Base Database Service (OBDS)](./docs/dbcs/README.md) + + +The following quickstart is designed for non-database configurations: +* [Oracle Database Observability](./docs/observability/README.md) The following quickstart is designed for non-database configurations: @@ -175,7 +203,7 @@ YAML file templates are available under [`/config/samples`](./config/samples/). ## Uninstall the Operator - To uninstall the operator, the final step consists of deciding whether you want to delete the custom resource definitions (CRDs) and Kubernetes APIServices introduced into the cluster by the operator. Choose one of the following options: + To uninstall the operator, the final step consists of deciding whether you want to delete the custom resource definitions (CRDs) and Kubernetes `APIServices` introduced into the cluster by the operator. Choose one of the following options: * ### Delete the CRDs and APIServices @@ -226,7 +254,7 @@ This project welcomes contributions from the community. Before submitting a pull ## Help -You can submit a GitHub issue, oir submit an issue and then file an [Oracle Support service](https://support.oracle.com/portal/) request. To file an issue or a service request, use the following product ID: 14430. +You can submit a GitHub issue, or submit an issue and then file an [Oracle Support service](https://support.oracle.com/portal/) request. To file an issue or a service request, use the following product ID: 14430. ## Security @@ -243,9 +271,9 @@ The following is an example of a YAML file fragment for specifying Oracle Cloud ociSecretOCID: ocid1.vaultsecret.oc1... ``` -Examples in this repository where passwords are entered on the command line are for demonstration purposes only. +Examples in this repository where passwords are entered on the command line are for demonstration purposes only. ## License -Copyright (c) 2022, 2024 Oracle and/or its affiliates. +Copyright (c) 2022, 2025 Oracle and/or its affiliates. Released under the Universal Permissive License v1.0 as shown at [https://oss.oracle.com/licenses/upl/](https://oss.oracle.com/licenses/upl/) diff --git a/THIRD_PARTY_LICENSES.txt b/THIRD_PARTY_LICENSES.txt index d75f3946..14e4308f 100644 --- a/THIRD_PARTY_LICENSES.txt +++ b/THIRD_PARTY_LICENSES.txt @@ -1,11 +1,11 @@ ------------------------------------- -Operator SDK 1.32.0 +Operator SDK 1.37.0 https://github.com/operator-framework/operator-sdk Apache 2.0 ------------------------------------- -Apache License: +Apache License: Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -206,13 +206,13 @@ Apache License: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - + ------------------------------ - GO lang 1.21.4 + GO lang 1.23.3 https://github.com/golang - Copyright (c) 2009 The Go Authors. + Copyright (c) 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -241,22 +241,20 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ------------------------- -apimachinery 0.28.4 +apimachinery 0.31.3 https://github.com/kubernetes/apimachinery/tr Apache 2.0 ------------------------- -controller-runtime 0.16.3 +controller-runtime 0.19.3 https://github.com/kubernetes-sigs/controller-runtime/releases/tag/v0.16.3 Apache 2.0 ------------------------- -golang 1.21.4 +golang 1.23.3 https://github.com/golang/go/releases/tag/go1.21.4 -BSD 2-clause or 3-clause - -BSD 2-clause or 3-clause License: +BSD 2-clause or 3-clause License: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -1008,23 +1006,23 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. limitations under the License. -------------------------- -Logr 1.3.0 +Logr 1.4.2 https://pkg.go.dev/github.com/go-logr/logr https://github.com/go-logr/logr/tree/v1.3.0 Apache 2.0 License ------------------------- -OCI Go SDK 65.53.0 +OCI Go SDK 65.77.1 https://github.com/oracle/oci-go-sdk/releases/tag/v65.53.0 Dual-License: UPL + Apache 2.0 -UPL license: +UPL license: -Copyright (c) 2016, 2018, 2020, Oracle and/or its affiliates. -This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 +Copyright (c) 2016, 2018, 2020, Oracle and/or its affiliates. +This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl -or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. +or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. Copyright (c) 2019, 2020 Oracle and/or its affiliates. @@ -1065,58 +1063,58 @@ The Universal Permissive License (UPL), Version 1.0 ------------------------- -ginkgo 2.13.1 +ginkgo 2.202. https://github.com/onsi/ginkgo/releases/tag/v2.13.1 MIT ------------------------------------ Gomega github.com/onsi/gomega -MIT License +MIT License Copyright (c) 2013-2014 Onsi Fakhouri ---------------------------- -gomega 1.30.0 +gomega 1.34.2 http://onsi.github.io/gomega/ MIT ------------------------- -Kubernetes api 0.28.4 +Kubernetes api 0.31.3 https://pkg.go.dev/k8s.io/api Apache 2.0 ---------------------------------- -Kubernetes apimachinery 0.28.4 +Kubernetes apimachinery 0.31.3 https://pkg.go.dev/k8s.io/apimachinery Apache 2.0 ----------------------------------- -Kubernetes client-go 0.28.4 +Kubernetes client-go 0.31.3 https://pkg.go.dev/k8s.io/client-go Apache 2.0 ------------------------------------- -Kubernetes controller-runtime project 0.16.3 +Kubernetes controller-runtime project 0.19.3 https://pkg.go.dev/sigs.k8s.io/controller-runtime Apache 2.0 ------------------------------------ kubernetes-sigs/yaml 1.4.0 https://github.com/kubernetes-sigs/yaml/tree/v1.3.0 -MIT +MIT ------------------------- -OCI SDK for Go 65.53.0 +OCI SDK for Go 65.77.1 https://github.com/oracle/oci-go-sdk Multiple Licenses: Apache 2.0, UPL ------------------------------ -Operator Lifecycle Manager (OLM) +Operator Lifecycle Manager (OLM) 0.30.0 github.com/operator-framework/operator-lifecycle-manager Apache 2.0 ------------------------------------ -Prometheus Operator 0.65.2 +Prometheus Operator 0.78.2 https://github.com/prometheus-operator/prometheus-operator Apache 2.0 @@ -1135,8 +1133,8 @@ https://pkg.go.dev/sigs.k8s.io/yaml Dual license: BSD-3-Clause, MIT ------------------------------------ -zap 1.26.0 -https://github.com/uber-go/zap/releases/tag/v1.26.0 +zap 1.27.0 +https://github.com/uber-go/zap/releases/tag/v1.27.0 MIT ------------------------------------ @@ -1163,7 +1161,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -Copyright (c) 2012 The Go Authors. +Copyright (c) 2012 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -1192,9 +1190,9 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ------------------------ -Ginkgo 2.13.1 +Ginkgo 2.20.2 github.com/onsi/ginkgo -MIT License +MIT License Copyright (c) 2013-2014 Onsi Fakhouri Permission is hereby granted, free of charge, to any person obtaining @@ -1216,4 +1214,3 @@ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------- - diff --git a/apis/database/v1alpha1/adbfamily_common_spec.go b/apis/database/v1alpha1/adbfamily_common_spec.go new file mode 100644 index 00000000..74eb9f94 --- /dev/null +++ b/apis/database/v1alpha1/adbfamily_common_spec.go @@ -0,0 +1,67 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1alpha1 + +// LastSuccessfulSpec is an annotation key which maps to the value of last successful spec +const LastSuccessfulSpec string = "lastSuccessfulSpec" + +/************************ +* OCI config +************************/ +type OciConfigSpec struct { + ConfigMapName *string `json:"configMapName,omitempty"` + SecretName *string `json:"secretName,omitempty"` +} + +/************************ +* ADB spec +************************/ +type K8sAdbSpec struct { + Name *string `json:"name,omitempty"` +} + +type OciAdbSpec struct { + Ocid *string `json:"ocid,omitempty"` +} + +// TargetSpec defines the spec of the target for backup/restore runs. +type TargetSpec struct { + K8sAdb K8sAdbSpec `json:"k8sADB,omitempty"` + OciAdb OciAdbSpec `json:"ociADB,omitempty"` +} diff --git a/apis/database/v1alpha1/autonomouscontainerdatabase_types.go b/apis/database/v1alpha1/autonomouscontainerdatabase_types.go index abf0ee0d..fd71b210 100644 --- a/apis/database/v1alpha1/autonomouscontainerdatabase_types.go +++ b/apis/database/v1alpha1/autonomouscontainerdatabase_types.go @@ -42,6 +42,8 @@ import ( "encoding/json" "reflect" + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" + "github.com/oracle/oci-go-sdk/v65/database" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -60,6 +62,17 @@ const ( AcdActionTerminate AcdActionEnum = "TERMINATE" ) +func GetAcdActionEnumFromString(val string) (AcdActionEnum, bool) { + var mappingAcdActionEnum = map[string]AcdActionEnum{ + "RESTART": AcdActionRestart, + "TERMINATE": AcdActionTerminate, + "": AcdActionBlank, + } + + enum, ok := mappingAcdActionEnum[val] + return enum, ok +} + // AutonomousContainerDatabaseSpec defines the desired state of AutonomousContainerDatabase type AutonomousContainerDatabaseSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster @@ -74,7 +87,7 @@ type AutonomousContainerDatabaseSpec struct { Action AcdActionEnum `json:"action,omitempty"` FreeformTags map[string]string `json:"freeformTags,omitempty"` - OCIConfig OCIConfigSpec `json:"ociConfig,omitempty"` + OCIConfig OciConfigSpec `json:"ociConfig,omitempty"` // +kubebuilder:default:=false HardLink *bool `json:"hardLink,omitempty"` } @@ -157,13 +170,13 @@ func (acd *AutonomousContainerDatabase) UpdateLastSuccessfulSpec() error { } // UpdateStatusFromOCIACD updates the status subresource -func (acd *AutonomousContainerDatabase) UpdateStatusFromOCIACD(ociObj database.AutonomousContainerDatabase) { +func (acd *AutonomousContainerDatabase) UpdateStatusFromOciAcd(ociObj database.AutonomousContainerDatabase) { acd.Status.LifecycleState = ociObj.LifecycleState - acd.Status.TimeCreated = FormatSDKTime(ociObj.TimeCreated) + acd.Status.TimeCreated = dbv4.FormatSDKTime(ociObj.TimeCreated) } // UpdateFromOCIADB updates the attributes using database.AutonomousContainerDatabase object -func (acd *AutonomousContainerDatabase) UpdateFromOCIACD(ociObj database.AutonomousContainerDatabase) (specChanged bool) { +func (acd *AutonomousContainerDatabase) UpdateFromOciAcd(ociObj database.AutonomousContainerDatabase) (specChanged bool) { oldACD := acd.DeepCopy() /*********************************** @@ -186,14 +199,14 @@ func (acd *AutonomousContainerDatabase) UpdateFromOCIACD(ociObj database.Autonom /*********************************** * update the status subresource ***********************************/ - acd.UpdateStatusFromOCIACD(ociObj) + acd.UpdateStatusFromOciAcd(ociObj) return !reflect.DeepEqual(oldACD.Spec, acd.Spec) } // RemoveUnchangedSpec removes the unchanged fields in spec, and returns if the spec has been changed. func (acd *AutonomousContainerDatabase) RemoveUnchangedSpec(prevSpec AutonomousContainerDatabaseSpec) (bool, error) { - changed, err := removeUnchangedFields(prevSpec, &acd.Spec) + changed, err := dbv4.RemoveUnchangedFields(prevSpec, &acd.Spec) if err != nil { return changed, err } diff --git a/apis/database/v1alpha1/autonomouscontainerdatabase_webhook.go b/apis/database/v1alpha1/autonomouscontainerdatabase_webhook.go index 37e1d819..10a16cd1 100644 --- a/apis/database/v1alpha1/autonomouscontainerdatabase_webhook.go +++ b/apis/database/v1alpha1/autonomouscontainerdatabase_webhook.go @@ -39,6 +39,7 @@ package v1alpha1 import ( + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -58,15 +59,13 @@ func (r *AutonomousContainerDatabase) SetupWebhookWithManager(mgr ctrl.Manager) Complete() } -//+kubebuilder:webhook:verbs=create;update,path=/validate-database-oracle-com-v1alpha1-autonomouscontainerdatabase,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=autonomouscontainerdatabases,versions=v1alpha1,name=vautonomouscontainerdatabase.kb.io,admissionReviewVersions={v1} +//+kubebuilder:webhook:verbs=create;update,path=/validate-database-oracle-com-v1alpha1-autonomouscontainerdatabase,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=autonomouscontainerdatabases,versions=v1alpha1,name=vautonomouscontainerdatabasev1alpha1.kb.io,admissionReviewVersions=v1 var _ webhook.Validator = &AutonomousContainerDatabase{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type func (r *AutonomousContainerDatabase) ValidateCreate() (admission.Warnings, error) { autonomouscontainerdatabaselog.Info("validate create", "name", r.Name) - - // TODO(user): fill in your validation logic upon object creation. return nil, nil } @@ -84,12 +83,12 @@ func (r *AutonomousContainerDatabase) ValidateUpdate(old runtime.Object) (admiss // cannot update when the old state is in intermediate state, except for the terminate operatrion var copiedSpec *AutonomousContainerDatabaseSpec = r.Spec.DeepCopy() - changed, err := removeUnchangedFields(oldACD.Spec, copiedSpec) + changed, err := dbv4.RemoveUnchangedFields(oldACD.Spec, copiedSpec) if err != nil { allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), err.Error())) } - if IsACDIntermediateState(oldACD.Status.LifecycleState) && changed { + if dbv4.IsACDIntermediateState(oldACD.Status.LifecycleState) && changed { allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "cannot change the spec when the lifecycleState is in an intermdeiate state")) diff --git a/apis/database/v1alpha1/autonomousdatabase_conversion.go b/apis/database/v1alpha1/autonomousdatabase_conversion.go new file mode 100644 index 00000000..ffccc181 --- /dev/null +++ b/apis/database/v1alpha1/autonomousdatabase_conversion.go @@ -0,0 +1,371 @@ +package v1alpha1 + +import ( + "errors" + + v4 "github.com/oracle/oracle-database-operator/apis/database/v4" + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +// ConvertTo converts this AutonomousDatabase to the Hub version (v4). +func (src *AutonomousDatabase) ConvertTo(dstRaw conversion.Hub) error { + dst := dstRaw.(*v4.AutonomousDatabase) + // Convert the Spec + dst.Spec.Action = src.Spec.Action + + // Details + dst.Spec.Details.Id = src.Spec.Details.Id + dst.Spec.Details.CompartmentId = src.Spec.Details.CompartmentId + dst.Spec.Details.AutonomousContainerDatabase.K8sAcd.Name = src.Spec.Details.AutonomousContainerDatabase.K8sAcd.Name + dst.Spec.Details.AutonomousContainerDatabase.OciAcd.Id = src.Spec.Details.AutonomousContainerDatabase.OciAcd.Id + dst.Spec.Details.DisplayName = src.Spec.Details.DisplayName + dst.Spec.Details.DbName = src.Spec.Details.DbName + dst.Spec.Details.DbWorkload = src.Spec.Details.DbWorkload + dst.Spec.Details.LicenseModel = src.Spec.Details.LicenseModel + dst.Spec.Details.DbVersion = src.Spec.Details.DbVersion + dst.Spec.Details.DataStorageSizeInTBs = src.Spec.Details.DataStorageSizeInTBs + dst.Spec.Details.CpuCoreCount = src.Spec.Details.CpuCoreCount + dst.Spec.Details.ComputeModel = src.Spec.Details.ComputeModel + dst.Spec.Details.ComputeCount = src.Spec.Details.ComputeCount + dst.Spec.Details.OcpuCount = src.Spec.Details.OcpuCount + dst.Spec.Details.AdminPassword.K8sSecret.Name = src.Spec.Details.AdminPassword.K8sSecret.Name + dst.Spec.Details.AdminPassword.OciSecret.Id = src.Spec.Details.AdminPassword.OciSecret.Id + dst.Spec.Details.IsAutoScalingEnabled = src.Spec.Details.IsAutoScalingEnabled + dst.Spec.Details.IsDedicated = src.Spec.Details.IsDedicated + dst.Spec.Details.IsFreeTier = src.Spec.Details.IsFreeTier + dst.Spec.Details.IsAccessControlEnabled = src.Spec.Details.IsAccessControlEnabled + dst.Spec.Details.WhitelistedIps = src.Spec.Details.WhitelistedIps + dst.Spec.Details.SubnetId = src.Spec.Details.SubnetId + dst.Spec.Details.NsgIds = src.Spec.Details.NsgIds + dst.Spec.Details.PrivateEndpointLabel = src.Spec.Details.PrivateEndpointLabel + dst.Spec.Details.IsMtlsConnectionRequired = src.Spec.Details.IsMtlsConnectionRequired + dst.Spec.Details.FreeformTags = src.Spec.Details.FreeformTags + + // Clone + dst.Spec.Clone.CompartmentId = src.Spec.Clone.CompartmentId + dst.Spec.Clone.AutonomousContainerDatabase.K8sAcd.Name = src.Spec.Clone.AutonomousContainerDatabase.K8sAcd.Name + dst.Spec.Clone.AutonomousContainerDatabase.OciAcd.Id = src.Spec.Clone.AutonomousContainerDatabase.OciAcd.Id + dst.Spec.Clone.DisplayName = src.Spec.Clone.DisplayName + dst.Spec.Clone.DbName = src.Spec.Clone.DbName + dst.Spec.Clone.DbWorkload = src.Spec.Clone.DbWorkload + dst.Spec.Clone.LicenseModel = src.Spec.Clone.LicenseModel + dst.Spec.Clone.DbVersion = src.Spec.Clone.DbVersion + dst.Spec.Clone.DataStorageSizeInTBs = src.Spec.Clone.DataStorageSizeInTBs + dst.Spec.Clone.CpuCoreCount = src.Spec.Clone.CpuCoreCount + dst.Spec.Clone.ComputeModel = src.Spec.Clone.ComputeModel + dst.Spec.Clone.ComputeCount = src.Spec.Clone.ComputeCount + dst.Spec.Clone.OcpuCount = src.Spec.Clone.OcpuCount + dst.Spec.Clone.AdminPassword.K8sSecret.Name = src.Spec.Clone.AdminPassword.K8sSecret.Name + dst.Spec.Clone.AdminPassword.OciSecret.Id = src.Spec.Clone.AdminPassword.OciSecret.Id + dst.Spec.Clone.IsAutoScalingEnabled = src.Spec.Clone.IsAutoScalingEnabled + dst.Spec.Clone.IsDedicated = src.Spec.Clone.IsDedicated + dst.Spec.Clone.IsFreeTier = src.Spec.Clone.IsFreeTier + dst.Spec.Clone.IsAccessControlEnabled = src.Spec.Clone.IsAccessControlEnabled + dst.Spec.Clone.WhitelistedIps = src.Spec.Clone.WhitelistedIps + dst.Spec.Clone.SubnetId = src.Spec.Clone.SubnetId + dst.Spec.Clone.NsgIds = src.Spec.Clone.NsgIds + dst.Spec.Clone.PrivateEndpointLabel = src.Spec.Clone.PrivateEndpointLabel + dst.Spec.Clone.IsMtlsConnectionRequired = src.Spec.Clone.IsMtlsConnectionRequired + dst.Spec.Clone.FreeformTags = src.Spec.Clone.FreeformTags + dst.Spec.Clone.CloneType = src.Spec.Clone.CloneType + + // Wallet + dst.Spec.Wallet.Name = src.Spec.Wallet.Name + dst.Spec.Wallet.Password.K8sSecret.Name = src.Spec.Wallet.Password.K8sSecret.Name + dst.Spec.Wallet.Password.OciSecret.Id = src.Spec.Wallet.Password.OciSecret.Id + + dst.Spec.OciConfig.ConfigMapName = src.Spec.OciConfig.ConfigMapName + dst.Spec.OciConfig.SecretName = src.Spec.OciConfig.SecretName + + dst.Spec.HardLink = src.Spec.HardLink + + // Convert the Status + dst.Status.LifecycleState = src.Status.LifecycleState + dst.Status.TimeCreated = src.Status.TimeCreated + dst.Status.WalletExpiringDate = src.Status.WalletExpiringDate + + // convert status.allConnectionStrings + if src.Status.AllConnectionStrings != nil { + for _, srcProfile := range src.Status.AllConnectionStrings { + dstProfile := v4.ConnectionStringProfile{} + + // convert status.allConnectionStrings[i].tlsAuthentication + if val, ok := v4.GetTLSAuthenticationEnumFromString(string(srcProfile.TLSAuthentication)); !ok { + return errors.New("Unable to convert to TLSAuthenticationEnum: " + string(srcProfile.TLSAuthentication)) + } else { + dstProfile.TLSAuthentication = val + } + + // convert status.allConnectionStrings[i].connectionStrings + dstProfile.ConnectionStrings = make([]v4.ConnectionStringSpec, len(srcProfile.ConnectionStrings)) + for i, v := range srcProfile.ConnectionStrings { + dstProfile.ConnectionStrings[i].TNSName = v.TNSName + dstProfile.ConnectionStrings[i].ConnectionString = v.ConnectionString + } + + dst.Status.AllConnectionStrings = append(dst.Status.AllConnectionStrings, dstProfile) + } + } + + dst.Status.Conditions = src.Status.Conditions + + dst.ObjectMeta = src.ObjectMeta + return nil +} + +// ConvertFrom converts from the Hub version (v4) to v1alpha1 +func (dst *AutonomousDatabase) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*v4.AutonomousDatabase) + + // Convert the Spec + dst.Spec.Action = src.Spec.Action + + // Details + dst.Spec.Details.Id = src.Spec.Details.Id + dst.Spec.Details.CompartmentId = src.Spec.Details.CompartmentId + dst.Spec.Details.AutonomousContainerDatabase.K8sAcd.Name = src.Spec.Details.AutonomousContainerDatabase.K8sAcd.Name + dst.Spec.Details.AutonomousContainerDatabase.OciAcd.Id = src.Spec.Details.AutonomousContainerDatabase.OciAcd.Id + dst.Spec.Details.DisplayName = src.Spec.Details.DisplayName + dst.Spec.Details.DbName = src.Spec.Details.DbName + dst.Spec.Details.DbWorkload = src.Spec.Details.DbWorkload + dst.Spec.Details.LicenseModel = src.Spec.Details.LicenseModel + dst.Spec.Details.DbVersion = src.Spec.Details.DbVersion + dst.Spec.Details.DataStorageSizeInTBs = src.Spec.Details.DataStorageSizeInTBs + dst.Spec.Details.CpuCoreCount = src.Spec.Details.CpuCoreCount + dst.Spec.Details.ComputeModel = src.Spec.Details.ComputeModel + dst.Spec.Details.ComputeCount = src.Spec.Details.ComputeCount + dst.Spec.Details.OcpuCount = src.Spec.Details.OcpuCount + dst.Spec.Details.AdminPassword.K8sSecret.Name = src.Spec.Details.AdminPassword.K8sSecret.Name + dst.Spec.Details.AdminPassword.OciSecret.Id = src.Spec.Details.AdminPassword.OciSecret.Id + dst.Spec.Details.IsAutoScalingEnabled = src.Spec.Details.IsAutoScalingEnabled + dst.Spec.Details.IsDedicated = src.Spec.Details.IsDedicated + dst.Spec.Details.IsFreeTier = src.Spec.Details.IsFreeTier + dst.Spec.Details.IsAccessControlEnabled = src.Spec.Details.IsAccessControlEnabled + dst.Spec.Details.WhitelistedIps = src.Spec.Details.WhitelistedIps + dst.Spec.Details.SubnetId = src.Spec.Details.SubnetId + dst.Spec.Details.NsgIds = src.Spec.Details.NsgIds + dst.Spec.Details.PrivateEndpointLabel = src.Spec.Details.PrivateEndpointLabel + dst.Spec.Details.IsMtlsConnectionRequired = src.Spec.Details.IsMtlsConnectionRequired + dst.Spec.Details.FreeformTags = src.Spec.Details.FreeformTags + + // Clone + dst.Spec.Clone.CompartmentId = src.Spec.Clone.CompartmentId + dst.Spec.Clone.AutonomousContainerDatabase.K8sAcd.Name = src.Spec.Clone.AutonomousContainerDatabase.K8sAcd.Name + dst.Spec.Clone.AutonomousContainerDatabase.OciAcd.Id = src.Spec.Clone.AutonomousContainerDatabase.OciAcd.Id + dst.Spec.Clone.DisplayName = src.Spec.Clone.DisplayName + dst.Spec.Clone.DbName = src.Spec.Clone.DbName + dst.Spec.Clone.DbWorkload = src.Spec.Clone.DbWorkload + dst.Spec.Clone.LicenseModel = src.Spec.Clone.LicenseModel + dst.Spec.Clone.DbVersion = src.Spec.Clone.DbVersion + dst.Spec.Clone.DataStorageSizeInTBs = src.Spec.Clone.DataStorageSizeInTBs + dst.Spec.Clone.CpuCoreCount = src.Spec.Clone.CpuCoreCount + dst.Spec.Clone.ComputeModel = src.Spec.Clone.ComputeModel + dst.Spec.Clone.ComputeCount = src.Spec.Clone.ComputeCount + dst.Spec.Clone.OcpuCount = src.Spec.Clone.OcpuCount + dst.Spec.Clone.AdminPassword.K8sSecret.Name = src.Spec.Clone.AdminPassword.K8sSecret.Name + dst.Spec.Clone.AdminPassword.OciSecret.Id = src.Spec.Clone.AdminPassword.OciSecret.Id + dst.Spec.Clone.IsAutoScalingEnabled = src.Spec.Clone.IsAutoScalingEnabled + dst.Spec.Clone.IsDedicated = src.Spec.Clone.IsDedicated + dst.Spec.Clone.IsFreeTier = src.Spec.Clone.IsFreeTier + dst.Spec.Clone.IsAccessControlEnabled = src.Spec.Clone.IsAccessControlEnabled + dst.Spec.Clone.WhitelistedIps = src.Spec.Clone.WhitelistedIps + dst.Spec.Clone.SubnetId = src.Spec.Clone.SubnetId + dst.Spec.Clone.NsgIds = src.Spec.Clone.NsgIds + dst.Spec.Clone.PrivateEndpointLabel = src.Spec.Clone.PrivateEndpointLabel + dst.Spec.Clone.IsMtlsConnectionRequired = src.Spec.Clone.IsMtlsConnectionRequired + dst.Spec.Clone.FreeformTags = src.Spec.Clone.FreeformTags + dst.Spec.Clone.CloneType = src.Spec.Clone.CloneType + + // Wallet + dst.Spec.Wallet.Name = src.Spec.Wallet.Name + dst.Spec.Wallet.Password.K8sSecret.Name = src.Spec.Wallet.Password.K8sSecret.Name + dst.Spec.Wallet.Password.OciSecret.Id = src.Spec.Wallet.Password.OciSecret.Id + + dst.Spec.OciConfig.ConfigMapName = src.Spec.OciConfig.ConfigMapName + dst.Spec.OciConfig.SecretName = src.Spec.OciConfig.SecretName + + dst.Spec.HardLink = src.Spec.HardLink + + // Convert the Status + dst.Status.LifecycleState = src.Status.LifecycleState + dst.Status.TimeCreated = src.Status.TimeCreated + dst.Status.WalletExpiringDate = src.Status.WalletExpiringDate + + // convert status.allConnectionStrings + if src.Status.AllConnectionStrings != nil { + for _, srcProfile := range src.Status.AllConnectionStrings { + dstProfile := ConnectionStringProfile{} + + // convert status.allConnectionStrings[i].tlsAuthentication + if val, ok := GetTLSAuthenticationEnumFromString(string(srcProfile.TLSAuthentication)); !ok { + return errors.New("Unable to convert to TLSAuthenticationEnum: " + string(srcProfile.TLSAuthentication)) + } else { + dstProfile.TLSAuthentication = val + } + + // convert status.allConnectionStrings[i].connectionStrings + dstProfile.ConnectionStrings = make([]ConnectionStringSpec, len(srcProfile.ConnectionStrings)) + for i, v := range srcProfile.ConnectionStrings { + dstProfile.ConnectionStrings[i].TNSName = v.TNSName + dstProfile.ConnectionStrings[i].ConnectionString = v.ConnectionString + } + + dst.Status.AllConnectionStrings = append(dst.Status.AllConnectionStrings, dstProfile) + } + } + + dst.Status.Conditions = src.Status.Conditions + + dst.ObjectMeta = src.ObjectMeta + return nil +} + +func (src *AutonomousDatabaseBackup) ConvertTo(dstRaw conversion.Hub) error { + dst := dstRaw.(*v4.AutonomousDatabaseBackup) + + dst.Spec.Target.K8sAdb.Name = src.Spec.Target.K8sAdb.Name + dst.Spec.Target.OciAdb.OCID = src.Spec.Target.OciAdb.Ocid + dst.Spec.DisplayName = src.Spec.DisplayName + dst.Spec.AutonomousDatabaseBackupOCID = src.Spec.AutonomousDatabaseBackupOCID + dst.Spec.IsLongTermBackup = src.Spec.IsLongTermBackup + dst.Spec.RetentionPeriodInDays = src.Spec.RetentionPeriodInDays + dst.Spec.OCIConfig.ConfigMapName = src.Spec.OCIConfig.ConfigMapName + dst.Spec.OCIConfig.SecretName = src.Spec.OCIConfig.SecretName + + dst.Status.LifecycleState = src.Status.LifecycleState + dst.Status.Type = src.Status.Type + dst.Status.IsAutomatic = src.Status.IsAutomatic + dst.Status.TimeStarted = src.Status.TimeStarted + dst.Status.TimeEnded = src.Status.TimeEnded + dst.Status.AutonomousDatabaseOCID = src.Status.AutonomousDatabaseOCID + dst.Status.CompartmentOCID = src.Status.CompartmentOCID + dst.Status.DBName = src.Status.DBName + dst.Status.DBDisplayName = src.Status.DBDisplayName + + dst.ObjectMeta = src.ObjectMeta + return nil +} + +func (dst *AutonomousDatabaseBackup) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*v4.AutonomousDatabaseBackup) + + dst.Spec.Target.K8sAdb.Name = src.Spec.Target.K8sAdb.Name + dst.Spec.Target.OciAdb.Ocid = src.Spec.Target.OciAdb.OCID + dst.Spec.DisplayName = src.Spec.DisplayName + dst.Spec.AutonomousDatabaseBackupOCID = src.Spec.AutonomousDatabaseBackupOCID + dst.Spec.IsLongTermBackup = src.Spec.IsLongTermBackup + dst.Spec.RetentionPeriodInDays = src.Spec.RetentionPeriodInDays + dst.Spec.OCIConfig.ConfigMapName = src.Spec.OCIConfig.ConfigMapName + dst.Spec.OCIConfig.SecretName = src.Spec.OCIConfig.SecretName + + dst.Status.LifecycleState = src.Status.LifecycleState + dst.Status.Type = src.Status.Type + dst.Status.IsAutomatic = src.Status.IsAutomatic + dst.Status.TimeStarted = src.Status.TimeStarted + dst.Status.TimeEnded = src.Status.TimeEnded + dst.Status.AutonomousDatabaseOCID = src.Status.AutonomousDatabaseOCID + dst.Status.CompartmentOCID = src.Status.CompartmentOCID + dst.Status.DBName = src.Status.DBName + dst.Status.DBDisplayName = src.Status.DBDisplayName + + dst.ObjectMeta = src.ObjectMeta + return nil +} + +func (src *AutonomousDatabaseRestore) ConvertTo(dstRaw conversion.Hub) error { + dst := dstRaw.(*v4.AutonomousDatabaseRestore) + + dst.Spec.Target.K8sAdb.Name = src.Spec.Target.K8sAdb.Name + dst.Spec.Target.OciAdb.OCID = src.Spec.Target.OciAdb.Ocid + dst.Spec.Source.K8sAdbBackup.Name = src.Spec.Source.K8sAdbBackup.Name + dst.Spec.Source.PointInTime.Timestamp = src.Spec.Source.PointInTime.Timestamp + dst.Spec.OCIConfig.ConfigMapName = src.Spec.OCIConfig.ConfigMapName + dst.Spec.OCIConfig.SecretName = src.Spec.OCIConfig.SecretName + + dst.Status.DisplayName = src.Status.DisplayName + dst.Status.TimeAccepted = src.Status.TimeAccepted + dst.Status.TimeStarted = src.Status.TimeStarted + dst.Status.TimeEnded = src.Status.TimeEnded + dst.Status.DbName = src.Status.DbName + dst.Status.WorkRequestOCID = src.Status.WorkRequestOCID + dst.Status.Status = src.Status.Status + + dst.ObjectMeta = src.ObjectMeta + return nil +} + +func (dst *AutonomousDatabaseRestore) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*v4.AutonomousDatabaseRestore) + + dst.Spec.Target.K8sAdb.Name = src.Spec.Target.K8sAdb.Name + dst.Spec.Target.OciAdb.Ocid = src.Spec.Target.OciAdb.OCID + dst.Spec.Source.K8sAdbBackup.Name = src.Spec.Source.K8sAdbBackup.Name + dst.Spec.Source.PointInTime.Timestamp = src.Spec.Source.PointInTime.Timestamp + dst.Spec.OCIConfig.ConfigMapName = src.Spec.OCIConfig.ConfigMapName + dst.Spec.OCIConfig.SecretName = src.Spec.OCIConfig.SecretName + + dst.Status.DisplayName = src.Status.DisplayName + dst.Status.TimeAccepted = src.Status.TimeAccepted + dst.Status.TimeStarted = src.Status.TimeStarted + dst.Status.TimeEnded = src.Status.TimeEnded + dst.Status.DbName = src.Status.DbName + dst.Status.WorkRequestOCID = src.Status.WorkRequestOCID + dst.Status.Status = src.Status.Status + + dst.ObjectMeta = src.ObjectMeta + return nil +} + +func (src *AutonomousContainerDatabase) ConvertTo(dstRaw conversion.Hub) error { + dst := dstRaw.(*v4.AutonomousContainerDatabase) + + dst.Spec.AutonomousContainerDatabaseOCID = src.Spec.AutonomousContainerDatabaseOCID + dst.Spec.CompartmentOCID = src.Spec.CompartmentOCID + dst.Spec.DisplayName = src.Spec.DisplayName + dst.Spec.AutonomousExadataVMClusterOCID = src.Spec.AutonomousExadataVMClusterOCID + dst.Spec.PatchModel = src.Spec.PatchModel + + if val, ok := v4.GetAcdActionEnumFromString(string(src.Spec.Action)); !ok { + return errors.New("Unable to convert to AcdActionEnum: " + string(src.Spec.Action)) + } else { + dst.Spec.Action = val + } + + dst.Spec.FreeformTags = src.Spec.FreeformTags + dst.Spec.OCIConfig.ConfigMapName = src.Spec.OCIConfig.ConfigMapName + dst.Spec.OCIConfig.SecretName = src.Spec.OCIConfig.SecretName + dst.Spec.HardLink = src.Spec.HardLink + + dst.Status.LifecycleState = src.Status.LifecycleState + dst.Status.TimeCreated = src.Status.TimeCreated + + dst.ObjectMeta = src.ObjectMeta + return nil +} + +func (dst *AutonomousContainerDatabase) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*v4.AutonomousContainerDatabase) + + dst.Spec.AutonomousContainerDatabaseOCID = src.Spec.AutonomousContainerDatabaseOCID + dst.Spec.CompartmentOCID = src.Spec.CompartmentOCID + dst.Spec.DisplayName = src.Spec.DisplayName + dst.Spec.AutonomousExadataVMClusterOCID = src.Spec.AutonomousExadataVMClusterOCID + dst.Spec.PatchModel = src.Spec.PatchModel + + if val, ok := GetAcdActionEnumFromString(string(src.Spec.Action)); !ok { + return errors.New("Unable to convert to AcdActionEnum: " + string(src.Spec.Action)) + } else { + dst.Spec.Action = val + } + + dst.Spec.FreeformTags = src.Spec.FreeformTags + dst.Spec.OCIConfig.ConfigMapName = src.Spec.OCIConfig.ConfigMapName + dst.Spec.OCIConfig.SecretName = src.Spec.OCIConfig.SecretName + dst.Spec.HardLink = src.Spec.HardLink + + dst.Status.LifecycleState = src.Status.LifecycleState + dst.Status.TimeCreated = src.Status.TimeCreated + + dst.ObjectMeta = src.ObjectMeta + return nil +} diff --git a/apis/database/v1alpha1/autonomousdatabase_types.go b/apis/database/v1alpha1/autonomousdatabase_types.go index cd23b3f3..099703c2 100644 --- a/apis/database/v1alpha1/autonomousdatabase_types.go +++ b/apis/database/v1alpha1/autonomousdatabase_types.go @@ -39,9 +39,6 @@ package v1alpha1 import ( - "encoding/json" - "reflect" - "github.com/oracle/oci-go-sdk/v65/database" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -49,34 +46,79 @@ import ( // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. -// name of our custom finalizer -const ADB_FINALIZER = "database.oracle.com/adb-finalizer" - // AutonomousDatabaseSpec defines the desired state of AutonomousDatabase // Important: Run "make" to regenerate code after modifying this file type AutonomousDatabaseSpec struct { - Details AutonomousDatabaseDetails `json:"details"` - OCIConfig OCIConfigSpec `json:"ociConfig,omitempty"` + // +kubebuilder:validation:Enum:="";Create;Sync;Update;Stop;Start;Terminate;Clone + Action string `json:"action"` + Details AutonomousDatabaseDetails `json:"details,omitempty"` + Clone AutonomousDatabaseClone `json:"clone,omitempty"` + Wallet WalletSpec `json:"wallet,omitempty"` + OciConfig OciConfigSpec `json:"ociConfig,omitempty"` // +kubebuilder:default:=false HardLink *bool `json:"hardLink,omitempty"` } +type AutonomousDatabaseDetails struct { + AutonomousDatabaseBase `json:",inline"` + Id *string `json:"id,omitempty"` +} + +type AutonomousDatabaseClone struct { + AutonomousDatabaseBase `json:",inline"` + // +kubebuilder:validation:Enum:="FULL";"METADATA" + CloneType database.CreateAutonomousDatabaseCloneDetailsCloneTypeEnum `json:"cloneType,omitempty"` +} + +// AutonomousDatabaseBase defines the detail information of AutonomousDatabase, corresponding to oci-go-sdk/database/AutonomousDatabase +type AutonomousDatabaseBase struct { + CompartmentId *string `json:"compartmentId,omitempty"` + AutonomousContainerDatabase AcdSpec `json:"autonomousContainerDatabase,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + DbName *string `json:"dbName,omitempty"` + // +kubebuilder:validation:Enum:="OLTP";"DW";"AJD";"APEX" + DbWorkload database.AutonomousDatabaseDbWorkloadEnum `json:"dbWorkload,omitempty"` + // +kubebuilder:validation:Enum:="LICENSE_INCLUDED";"BRING_YOUR_OWN_LICENSE" + LicenseModel database.AutonomousDatabaseLicenseModelEnum `json:"licenseModel,omitempty"` + DbVersion *string `json:"dbVersion,omitempty"` + DataStorageSizeInTBs *int `json:"dataStorageSizeInTBs,omitempty"` + CpuCoreCount *int `json:"cpuCoreCount,omitempty"` + // +kubebuilder:validation:Enum:="ECPU";"OCPU" + ComputeModel database.AutonomousDatabaseComputeModelEnum `json:"computeModel,omitempty"` + ComputeCount *float32 `json:"computeCount,omitempty"` + OcpuCount *float32 `json:"ocpuCount,omitempty"` + AdminPassword PasswordSpec `json:"adminPassword,omitempty"` + IsAutoScalingEnabled *bool `json:"isAutoScalingEnabled,omitempty"` + IsDedicated *bool `json:"isDedicated,omitempty"` + IsFreeTier *bool `json:"isFreeTier,omitempty"` + + // NetworkAccess + IsAccessControlEnabled *bool `json:"isAccessControlEnabled,omitempty"` + WhitelistedIps []string `json:"whitelistedIps,omitempty"` + SubnetId *string `json:"subnetId,omitempty"` + NsgIds []string `json:"nsgIds,omitempty"` + PrivateEndpointLabel *string `json:"privateEndpointLabel,omitempty"` + IsMtlsConnectionRequired *bool `json:"isMtlsConnectionRequired,omitempty"` + + FreeformTags map[string]string `json:"freeformTags,omitempty"` +} + /************************ * ACD specs ************************/ -type K8sACDSpec struct { +type K8sAcdSpec struct { Name *string `json:"name,omitempty"` } -type OCIACDSpec struct { - OCID *string `json:"ocid,omitempty"` +type OciAcdSpec struct { + Id *string `json:"id,omitempty"` } -// ACDSpec defines the spec of the target for backup/restore runs. +// AcdSpec defines the spec of the target for backup/restore runs. // The name could be the name of an AutonomousDatabase or an AutonomousDatabaseBackup -type ACDSpec struct { - K8sACD K8sACDSpec `json:"k8sACD,omitempty"` - OCIACD OCIACDSpec `json:"ociACD,omitempty"` +type AcdSpec struct { + K8sAcd K8sAcdSpec `json:"k8sAcd,omitempty"` + OciAcd OciAcdSpec `json:"ociAcd,omitempty"` } /************************ @@ -86,13 +128,13 @@ type K8sSecretSpec struct { Name *string `json:"name,omitempty"` } -type OCISecretSpec struct { - OCID *string `json:"ocid,omitempty"` +type OciSecretSpec struct { + Id *string `json:"id,omitempty"` } type PasswordSpec struct { K8sSecret K8sSecretSpec `json:"k8sSecret,omitempty"` - OCISecret OCISecretSpec `json:"ociSecret,omitempty"` + OciSecret OciSecretSpec `json:"ociSecret,omitempty"` } type WalletSpec struct { @@ -100,67 +142,16 @@ type WalletSpec struct { Password PasswordSpec `json:"password,omitempty"` } -/************************ -* Network Access specs -************************/ - -type NetworkAccessTypeEnum string - -const ( - NetworkAccessTypePublic NetworkAccessTypeEnum = "PUBLIC" - NetworkAccessTypeRestricted NetworkAccessTypeEnum = "RESTRICTED" - NetworkAccessTypePrivate NetworkAccessTypeEnum = "PRIVATE" -) - -type NetworkAccessSpec struct { - // +kubebuilder:validation:Enum:="";"PUBLIC";"RESTRICTED";"PRIVATE" - AccessType NetworkAccessTypeEnum `json:"accessType,omitempty"` - IsAccessControlEnabled *bool `json:"isAccessControlEnabled,omitempty"` - AccessControlList []string `json:"accessControlList,omitempty"` - PrivateEndpoint PrivateEndpointSpec `json:"privateEndpoint,omitempty"` - IsMTLSConnectionRequired *bool `json:"isMTLSConnectionRequired,omitempty"` -} - -type PrivateEndpointSpec struct { - SubnetOCID *string `json:"subnetOCID,omitempty"` - NsgOCIDs []string `json:"nsgOCIDs,omitempty"` - HostnamePrefix *string `json:"hostnamePrefix,omitempty"` -} - -// AutonomousDatabaseDetails defines the detail information of AutonomousDatabase, corresponding to oci-go-sdk/database/AutonomousDatabase -type AutonomousDatabaseDetails struct { - AutonomousDatabaseOCID *string `json:"autonomousDatabaseOCID,omitempty"` - CompartmentOCID *string `json:"compartmentOCID,omitempty"` - AutonomousContainerDatabase ACDSpec `json:"autonomousContainerDatabase,omitempty"` - DisplayName *string `json:"displayName,omitempty"` - DbName *string `json:"dbName,omitempty"` - // +kubebuilder:validation:Enum:="OLTP";"DW";"AJD";"APEX" - DbWorkload database.AutonomousDatabaseDbWorkloadEnum `json:"dbWorkload,omitempty"` - // +kubebuilder:validation:Enum:="LICENSE_INCLUDED";"BRING_YOUR_OWN_LICENSE" - LicenseModel database.AutonomousDatabaseLicenseModelEnum `json:"licenseModel,omitempty"` - DbVersion *string `json:"dbVersion,omitempty"` - DataStorageSizeInTBs *int `json:"dataStorageSizeInTBs,omitempty"` - CPUCoreCount *int `json:"cpuCoreCount,omitempty"` - AdminPassword PasswordSpec `json:"adminPassword,omitempty"` - IsAutoScalingEnabled *bool `json:"isAutoScalingEnabled,omitempty"` - IsDedicated *bool `json:"isDedicated,omitempty"` - LifecycleState database.AutonomousDatabaseLifecycleStateEnum `json:"lifecycleState,omitempty"` - - NetworkAccess NetworkAccessSpec `json:"networkAccess,omitempty"` - - FreeformTags map[string]string `json:"freeformTags,omitempty"` - - Wallet WalletSpec `json:"wallet,omitempty"` -} - // AutonomousDatabaseStatus defines the observed state of AutonomousDatabase type AutonomousDatabaseStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file - LifecycleState database.AutonomousDatabaseLifecycleStateEnum `json:"lifecycleState,omitempty"` - TimeCreated string `json:"timeCreated,omitempty"` - WalletExpiringDate string `json:"walletExpiringDate,omitempty"` - AllConnectionStrings []ConnectionStringProfile `json:"allConnectionStrings,omitempty"` + // Lifecycle State of the ADB + LifecycleState database.AutonomousDatabaseLifecycleStateEnum `json:"lifecycleState,omitempty"` + // Creation time of the ADB + TimeCreated string `json:"timeCreated,omitempty"` + // Expiring date of the instance wallet + WalletExpiringDate string `json:"walletExpiringDate,omitempty"` + // Connection Strings of the ADB + AllConnectionStrings []ConnectionStringProfile `json:"allConnectionStrings,omitempty"` // +patchMergeKey=type // +patchStrategy=merge // +listType=map @@ -175,6 +166,16 @@ const ( tlsAuthenticationMTLS TLSAuthenticationEnum = "Mutual TLS" ) +func GetTLSAuthenticationEnumFromString(val string) (TLSAuthenticationEnum, bool) { + var mappingTLSAuthenticationEnum = map[string]TLSAuthenticationEnum{ + "TLS": tlsAuthenticationTLS, + "Mutual TLS": tlsAuthenticationMTLS, + } + + enum, ok := mappingTLSAuthenticationEnum[val] + return enum, ok +} + type ConnectionStringProfile struct { TLSAuthentication TLSAuthenticationEnum `json:"tlsAuthentication,omitempty"` ConnectionStrings []ConnectionStringSpec `json:"connectionStrings"` @@ -217,181 +218,3 @@ type AutonomousDatabaseList struct { func init() { SchemeBuilder.Register(&AutonomousDatabase{}, &AutonomousDatabaseList{}) } - -// GetLastSuccessfulSpec returns spec from the lass successful reconciliation. -// Returns nil, nil if there is no lastSuccessfulSpec. -func (adb *AutonomousDatabase) GetLastSuccessfulSpec() (*AutonomousDatabaseSpec, error) { - val, ok := adb.GetAnnotations()[LastSuccessfulSpec] - if !ok { - return nil, nil - } - - specBytes := []byte(val) - sucSpec := AutonomousDatabaseSpec{} - - err := json.Unmarshal(specBytes, &sucSpec) - if err != nil { - return nil, err - } - - return &sucSpec, nil -} - -func (adb *AutonomousDatabase) UpdateLastSuccessfulSpec() error { - specBytes, err := json.Marshal(adb.Spec) - if err != nil { - return err - } - - anns := adb.GetAnnotations() - - if anns == nil { - anns = map[string]string{ - LastSuccessfulSpec: string(specBytes), - } - } else { - anns[LastSuccessfulSpec] = string(specBytes) - } - - adb.SetAnnotations(anns) - - return nil -} - -// UpdateStatusFromOCIADB updates the status subresource -func (adb *AutonomousDatabase) UpdateStatusFromOCIADB(ociObj database.AutonomousDatabase) { - adb.Status.LifecycleState = ociObj.LifecycleState - adb.Status.TimeCreated = FormatSDKTime(ociObj.TimeCreated) - - if *ociObj.IsDedicated { - conns := make([]ConnectionStringSpec, len(ociObj.ConnectionStrings.AllConnectionStrings)) - for key, val := range ociObj.ConnectionStrings.AllConnectionStrings { - conns = append(conns, ConnectionStringSpec{TNSName: key, ConnectionString: val}) - } - - adb.Status.AllConnectionStrings = []ConnectionStringProfile{ - {ConnectionStrings: conns}, - } - } else { - var mTLSConns []ConnectionStringSpec - var tlsConns []ConnectionStringSpec - - var conns []ConnectionStringProfile - - for _, profile := range ociObj.ConnectionStrings.Profiles { - if profile.TlsAuthentication == database.DatabaseConnectionStringProfileTlsAuthenticationMutual { - mTLSConns = append(mTLSConns, ConnectionStringSpec{TNSName: *profile.DisplayName, ConnectionString: *profile.Value}) - } else { - tlsConns = append(tlsConns, ConnectionStringSpec{TNSName: *profile.DisplayName, ConnectionString: *profile.Value}) - } - } - - if len(mTLSConns) > 0 { - conns = append(conns, ConnectionStringProfile{ - TLSAuthentication: tlsAuthenticationMTLS, - ConnectionStrings: mTLSConns, - }) - } - - if len(tlsConns) > 0 { - conns = append(conns, ConnectionStringProfile{ - TLSAuthentication: tlsAuthenticationTLS, - ConnectionStrings: tlsConns, - }) - } - - adb.Status.AllConnectionStrings = conns - } -} - -// UpdateFromOCIADB updates the attributes using database.AutonomousDatabase object -func (adb *AutonomousDatabase) UpdateFromOCIADB(ociObj database.AutonomousDatabase) (specChanged bool) { - oldADB := adb.DeepCopy() - - /*********************************** - * update the spec - ***********************************/ - adb.Spec.Details.AutonomousDatabaseOCID = ociObj.Id - adb.Spec.Details.CompartmentOCID = ociObj.CompartmentId - adb.Spec.Details.AutonomousContainerDatabase.OCIACD.OCID = ociObj.AutonomousContainerDatabaseId - adb.Spec.Details.DisplayName = ociObj.DisplayName - adb.Spec.Details.DbName = ociObj.DbName - adb.Spec.Details.DbWorkload = ociObj.DbWorkload - adb.Spec.Details.LicenseModel = ociObj.LicenseModel - adb.Spec.Details.DbVersion = ociObj.DbVersion - adb.Spec.Details.DataStorageSizeInTBs = ociObj.DataStorageSizeInTBs - adb.Spec.Details.CPUCoreCount = ociObj.CpuCoreCount - adb.Spec.Details.IsAutoScalingEnabled = ociObj.IsAutoScalingEnabled - adb.Spec.Details.IsDedicated = ociObj.IsDedicated - adb.Spec.Details.LifecycleState = NextADBStableState(ociObj.LifecycleState) - // Special case: an emtpy map will be nil after unmarshalling while the OCI always returns an emty map. - if len(ociObj.FreeformTags) != 0 { - adb.Spec.Details.FreeformTags = ociObj.FreeformTags - } else { - adb.Spec.Details.FreeformTags = nil - } - - // Determine network.accessType - if *ociObj.IsDedicated { - adb.Spec.Details.NetworkAccess.AccessType = NetworkAccessTypePrivate - } else { - if ociObj.NsgIds != nil || ociObj.PrivateEndpoint != nil || ociObj.PrivateEndpointIp != nil || ociObj.PrivateEndpointLabel != nil { - adb.Spec.Details.NetworkAccess.AccessType = NetworkAccessTypePrivate - } else if ociObj.WhitelistedIps != nil { - adb.Spec.Details.NetworkAccess.AccessType = NetworkAccessTypeRestricted - } else { - adb.Spec.Details.NetworkAccess.AccessType = NetworkAccessTypePublic - } - } - - adb.Spec.Details.NetworkAccess.IsAccessControlEnabled = ociObj.IsAccessControlEnabled - if len(ociObj.WhitelistedIps) != 0 { - adb.Spec.Details.NetworkAccess.AccessControlList = ociObj.WhitelistedIps - } else { - adb.Spec.Details.NetworkAccess.AccessControlList = nil - } - adb.Spec.Details.NetworkAccess.IsMTLSConnectionRequired = ociObj.IsMtlsConnectionRequired - adb.Spec.Details.NetworkAccess.PrivateEndpoint.SubnetOCID = ociObj.SubnetId - if len(ociObj.NsgIds) != 0 { - adb.Spec.Details.NetworkAccess.PrivateEndpoint.NsgOCIDs = ociObj.NsgIds - } else { - adb.Spec.Details.NetworkAccess.PrivateEndpoint.NsgOCIDs = nil - } - adb.Spec.Details.NetworkAccess.PrivateEndpoint.HostnamePrefix = ociObj.PrivateEndpointLabel - - // The admin password is not going to be updated in a bind operation. Erase the field if the lastSucSpec is nil. - // Leave the wallet field as is because the download wallet operation is independent from the update operation. - lastSucSpec, _ := adb.GetLastSuccessfulSpec() - if lastSucSpec == nil { - adb.Spec.Details.AdminPassword = PasswordSpec{} - } else { - adb.Spec.Details.AdminPassword = lastSucSpec.Details.AdminPassword - } - - /*********************************** - * update the status subresource - ***********************************/ - adb.UpdateStatusFromOCIADB(ociObj) - - return !reflect.DeepEqual(oldADB.Spec, adb.Spec) -} - -// RemoveUnchangedDetails removes the unchanged fields in spec.details, and returns if the details has been changed. -func (adb *AutonomousDatabase) RemoveUnchangedDetails(prevSpec AutonomousDatabaseSpec) (bool, error) { - - changed, err := removeUnchangedFields(prevSpec.Details, &adb.Spec.Details) - if err != nil { - return changed, err - } - - return changed, nil -} - -// A helper function which is useful for debugging. The function prints out a structural JSON format. -func (adb *AutonomousDatabase) String() (string, error) { - out, err := json.MarshalIndent(adb, "", " ") - if err != nil { - return "", err - } - return string(out), nil -} diff --git a/apis/database/v1alpha1/autonomousdatabase_webhook.go b/apis/database/v1alpha1/autonomousdatabase_webhook.go index b25e8104..e209ae7a 100644 --- a/apis/database/v1alpha1/autonomousdatabase_webhook.go +++ b/apis/database/v1alpha1/autonomousdatabase_webhook.go @@ -39,10 +39,6 @@ package v1alpha1 import ( - "fmt" - - "github.com/oracle/oci-go-sdk/v65/common" - "github.com/oracle/oci-go-sdk/v65/database" dbcommons "github.com/oracle/oracle-database-operator/commons/database" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" @@ -63,37 +59,7 @@ func (r *AutonomousDatabase) SetupWebhookWithManager(mgr ctrl.Manager) error { Complete() } -//+kubebuilder:webhook:verbs=create;update,path=/mutate-database-oracle-com-v1alpha1-autonomousdatabase,mutating=true,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=autonomousdatabases,versions=v1alpha1,name=mautonomousdatabase.kb.io,admissionReviewVersions=v1 - -var _ webhook.Defaulter = &AutonomousDatabase{} - -// Default implements webhook.Defaulter so a webhook will be registered for the type -func (r *AutonomousDatabase) Default() { - autonomousdatabaselog.Info("default", "name", r.Name) - - if !isDedicated(r) { // Shared database - // AccessType is PUBLIC by default - if r.Spec.Details.NetworkAccess.AccessType == NetworkAccessTypePublic { - r.Spec.Details.NetworkAccess.IsMTLSConnectionRequired = common.Bool(true) - r.Spec.Details.NetworkAccess.AccessControlList = nil - r.Spec.Details.NetworkAccess.PrivateEndpoint.HostnamePrefix = nil - r.Spec.Details.NetworkAccess.PrivateEndpoint.NsgOCIDs = nil - r.Spec.Details.NetworkAccess.PrivateEndpoint.SubnetOCID = nil - } else if r.Spec.Details.NetworkAccess.AccessType == NetworkAccessTypeRestricted { - r.Spec.Details.NetworkAccess.PrivateEndpoint.HostnamePrefix = nil - r.Spec.Details.NetworkAccess.PrivateEndpoint.NsgOCIDs = nil - r.Spec.Details.NetworkAccess.PrivateEndpoint.SubnetOCID = nil - } else if r.Spec.Details.NetworkAccess.AccessType == NetworkAccessTypePrivate { - r.Spec.Details.NetworkAccess.AccessControlList = nil - } - } else { // Dedicated database - // AccessType can only be PRIVATE for a dedicated database - r.Spec.Details.NetworkAccess.AccessType = NetworkAccessTypePrivate - } - -} - -//+kubebuilder:webhook:verbs=create;update,path=/validate-database-oracle-com-v1alpha1-autonomousdatabase,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=autonomousdatabases,versions=v1alpha1,name=vautonomousdatabase.kb.io,admissionReviewVersions={v1} +//+kubebuilder:webhook:verbs=create;update,path=/validate-database-oracle-com-v1alpha1-autonomousdatabase,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=autonomousdatabases,versions=v1alpha1,name=vautonomousdatabasev1alpha1.kb.io,admissionReviewVersions=v1 var _ webhook.Validator = &AutonomousDatabase{} @@ -117,17 +83,6 @@ func (r *AutonomousDatabase) ValidateCreate() (admission.Warnings, error) { } } - if r.Spec.Details.AutonomousDatabaseOCID == nil { // provisioning operation - allErrs = validateCommon(r, allErrs) - allErrs = validateNetworkAccess(r, allErrs) - - if r.Spec.Details.LifecycleState != "" { - allErrs = append(allErrs, - field.Forbidden(field.NewPath("spec").Child("details").Child("lifecycleState"), - "cannot apply lifecycleState to a provision operation")) - } - } - if len(allErrs) == 0 { return nil, nil } @@ -139,66 +94,43 @@ func (r *AutonomousDatabase) ValidateCreate() (admission.Warnings, error) { // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type func (r *AutonomousDatabase) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { var allErrs field.ErrorList - var oldADB *AutonomousDatabase = old.(*AutonomousDatabase) + var oldAdb *AutonomousDatabase = old.(*AutonomousDatabase) autonomousdatabaselog.Info("validate update", "name", r.Name) // skip the update of adding ADB OCID or binding - if oldADB.Status.LifecycleState == "" { - return nil, nil - } + // if oldAdb.Status.LifecycleState == "" { + // return nil, nil + // } // cannot update when the old state is in intermediate, except for the change to the hardLink or the terminate operatrion during valid lifecycleState - var copySpec *AutonomousDatabaseSpec = r.Spec.DeepCopy() - specChanged, err := removeUnchangedFields(oldADB.Spec, copySpec) - if err != nil { - allErrs = append(allErrs, - field.Forbidden(field.NewPath("spec"), err.Error())) - } + // var copySpec *AutonomousDatabaseSpec = r.Spec.DeepCopy() + // specChanged, err := dbv4.RemoveUnchangedFields(oldAdb.Spec, copySpec) + // if err != nil { + // allErrs = append(allErrs, + // field.Forbidden(field.NewPath("spec"), err.Error())) + // } - hardLinkChanged := copySpec.HardLink != nil + // hardLinkChanged := copySpec.HardLink != nil - terminateOp := ValidADBTerminateState(oldADB.Status.LifecycleState) && copySpec.Details.LifecycleState == database.AutonomousDatabaseLifecycleStateTerminated + // isTerminateOp := dbv4.CanBeTerminated(oldAdb.Status.LifecycleState) && copySpec.Action == "Terminate" - if specChanged && IsADBIntermediateState(oldADB.Status.LifecycleState) && !terminateOp && !hardLinkChanged { - allErrs = append(allErrs, - field.Forbidden(field.NewPath("spec"), - "cannot change the spec when the lifecycleState is in an intermdeiate state")) - } + // if specChanged && dbv4.IsAdbIntermediateState(oldAdb.Status.LifecycleState) && !isTerminateOp && !hardLinkChanged { + // allErrs = append(allErrs, + // field.Forbidden(field.NewPath("spec"), + // "cannot change the spec when the lifecycleState is in an intermdeiate state")) + // } // cannot modify autonomousDatabaseOCID - if r.Spec.Details.AutonomousDatabaseOCID != nil && - oldADB.Spec.Details.AutonomousDatabaseOCID != nil && - *r.Spec.Details.AutonomousDatabaseOCID != *oldADB.Spec.Details.AutonomousDatabaseOCID { + if r.Spec.Details.Id != nil && + oldAdb.Spec.Details.Id != nil && + *r.Spec.Details.Id != *oldAdb.Spec.Details.Id { allErrs = append(allErrs, field.Forbidden(field.NewPath("spec").Child("details").Child("autonomousDatabaseOCID"), "autonomousDatabaseOCID cannot be modified")) } - // cannot change lifecycleState with other fields together (except the oci config) - var lifecycleChanged, otherFieldsChanged bool - - lifecycleChanged = oldADB.Spec.Details.LifecycleState != "" && - r.Spec.Details.LifecycleState != "" && - oldADB.Spec.Details.LifecycleState != r.Spec.Details.LifecycleState - var copiedADB *AutonomousDatabaseSpec = r.Spec.DeepCopy() - copiedADB.Details.LifecycleState = oldADB.Spec.Details.LifecycleState - copiedADB.OCIConfig = oldADB.Spec.OCIConfig - - otherFieldsChanged, err = removeUnchangedFields(oldADB.Spec, copiedADB) - if err != nil { - allErrs = append(allErrs, - field.Forbidden(field.NewPath("spec"), err.Error())) - } - - if lifecycleChanged && otherFieldsChanged { - allErrs = append(allErrs, - field.Forbidden(field.NewPath("spec").Child("details").Child("LifecycleState"), - "cannot change lifecycleState with other spec attributes at the same time")) - } - allErrs = validateCommon(r, allErrs) - allErrs = validateNetworkAccess(r, allErrs) if len(allErrs) == 0 { return nil, nil @@ -210,13 +142,13 @@ func (r *AutonomousDatabase) ValidateUpdate(old runtime.Object) (admission.Warni func validateCommon(adb *AutonomousDatabase, allErrs field.ErrorList) field.ErrorList { // password - if adb.Spec.Details.AdminPassword.K8sSecret.Name != nil && adb.Spec.Details.AdminPassword.OCISecret.OCID != nil { + if adb.Spec.Details.AdminPassword.K8sSecret.Name != nil && adb.Spec.Details.AdminPassword.OciSecret.Id != nil { allErrs = append(allErrs, field.Forbidden(field.NewPath("spec").Child("details").Child("adminPassword"), "cannot apply k8sSecret.name and ociSecret.ocid at the same time")) } - if adb.Spec.Details.Wallet.Password.K8sSecret.Name != nil && adb.Spec.Details.Wallet.Password.OCISecret.OCID != nil { + if adb.Spec.Wallet.Password.K8sSecret.Name != nil && adb.Spec.Wallet.Password.OciSecret.Id != nil { allErrs = append(allErrs, field.Forbidden(field.NewPath("spec").Child("details").Child("wallet").Child("password"), "cannot apply k8sSecret.name and ociSecret.ocid at the same time")) @@ -225,69 +157,15 @@ func validateCommon(adb *AutonomousDatabase, allErrs field.ErrorList) field.Erro return allErrs } -func validateNetworkAccess(adb *AutonomousDatabase, allErrs field.ErrorList) field.ErrorList { - if !isDedicated(adb) { - // Shared database - if adb.Spec.Details.NetworkAccess.AccessType == NetworkAccessTypeRestricted { - if adb.Spec.Details.NetworkAccess.AccessControlList == nil { - allErrs = append(allErrs, - field.Forbidden(field.NewPath("spec").Child("details").Child("networkAccess").Child("accessControlList"), - fmt.Sprintf("accessControlList cannot be empty when the network access type is %s", NetworkAccessTypeRestricted))) - } - } else if adb.Spec.Details.NetworkAccess.AccessType == NetworkAccessTypePrivate { // the accessType is PRIVATE - if adb.Spec.Details.NetworkAccess.PrivateEndpoint.SubnetOCID == nil { - allErrs = append(allErrs, - field.Forbidden(field.NewPath("spec").Child("details").Child("networkAccess").Child("privateEndpoint").Child("subnetOCID"), - fmt.Sprintf("subnetOCID cannot be empty when the network access type is %s", NetworkAccessTypePrivate))) - } - } - - // NsgOCIDs only applies to PRIVATE accessType - if adb.Spec.Details.NetworkAccess.PrivateEndpoint.NsgOCIDs != nil && adb.Spec.Details.NetworkAccess.AccessType != NetworkAccessTypePrivate { - allErrs = append(allErrs, - field.Forbidden(field.NewPath("spec").Child("details").Child("networkAccess").Child("privateEndpoint").Child("nsgOCIDs"), - fmt.Sprintf("NsgOCIDs cannot only be applied when network access type is %s.", NetworkAccessTypePrivate))) - } - - // IsAccessControlEnabled is not applicable to a shared database - if adb.Spec.Details.NetworkAccess.IsAccessControlEnabled != nil { - allErrs = append(allErrs, - field.Forbidden(field.NewPath("spec").Child("details").Child("networkAccess").Child("IsAccessControlEnabled"), - "isAccessControlEnabled is not applicable on a shared Autonomous Database")) - } - } else { - // Dedicated database - - // accessControlList cannot be provided when Autonomous Database's access control is disabled - if adb.Spec.Details.NetworkAccess.AccessControlList != nil && - (adb.Spec.Details.NetworkAccess.IsAccessControlEnabled == nil || !*adb.Spec.Details.NetworkAccess.IsAccessControlEnabled) { - allErrs = append(allErrs, - field.Forbidden(field.NewPath("spec").Child("details").Child("networkAccess").Child("accessControlList"), - "access control list cannot be provided when Autonomous Database's access control is disabled")) - } - - // IsMTLSConnectionRequired is not supported by dedicated database - if adb.Spec.Details.NetworkAccess.IsMTLSConnectionRequired != nil { - allErrs = append(allErrs, - field.Forbidden(field.NewPath("spec").Child("details").Child("networkAccess").Child("isMTLSConnectionRequired"), - "isMTLSConnectionRequired is not supported on a dedicated database")) - } - } - - return allErrs -} - // ValidateDelete implements webhook.Validator so a webhook will be registered for the type func (r *AutonomousDatabase) ValidateDelete() (admission.Warnings, error) { autonomousdatabaselog.Info("validate delete", "name", r.Name) - - // TODO(user): fill in your validation logic upon object deletion. return nil, nil } // Returns true if AutonomousContainerDatabaseOCID has value. // We don't use Details.IsDedicated because the parameter might be null when it's a provision operation. func isDedicated(adb *AutonomousDatabase) bool { - return adb.Spec.Details.AutonomousContainerDatabase.K8sACD.Name != nil || - adb.Spec.Details.AutonomousContainerDatabase.OCIACD.OCID != nil + return adb.Spec.Details.AutonomousContainerDatabase.K8sAcd.Name != nil || + adb.Spec.Details.AutonomousContainerDatabase.OciAcd.Id != nil } diff --git a/apis/database/v1alpha1/autonomousdatabase_webhook_test.go b/apis/database/v1alpha1/autonomousdatabase_webhook_test.go index ee26021f..8949f8f4 100644 --- a/apis/database/v1alpha1/autonomousdatabase_webhook_test.go +++ b/apis/database/v1alpha1/autonomousdatabase_webhook_test.go @@ -50,55 +50,6 @@ import ( ) var _ = Describe("test AutonomousDatabase webhook", func() { - Describe("Test AutonomousDatabase mutating webhook", func() { - var ( - resourceName = "testadb" - namespace = "default" - adbLookupKey = types.NamespacedName{Name: resourceName, Namespace: namespace} - - timeout = time.Second * 5 - - adb *AutonomousDatabase - ) - - BeforeEach(func() { - adb = &AutonomousDatabase{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "database.oracle.com/v1alpha1", - Kind: "AutonomousDatabase", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: resourceName, - Namespace: namespace, - }, - Spec: AutonomousDatabaseSpec{ - Details: AutonomousDatabaseDetails{}, - }, - } - }) - - AfterEach(func() { - Expect(k8sClient.Delete(context.TODO(), adb)).To(Succeed()) - }) - - It("Should set the default network access type to PRIVATE, if it's a dedicated ADB", func() { - By("Creating an AutonomousDatabase with ACD_OCID") - adb.Spec.Details.AutonomousContainerDatabase.OCIACD.OCID = common.String("ocid1.autonomouscontainerdatabase.oc1.dummy-acd-ocid") - - Expect(k8sClient.Create(context.TODO(), adb)).To(Succeed()) - - By("Checking the AutonomousDatabase has a network access type PRIVATE") - Eventually(func() NetworkAccessTypeEnum { - err := k8sClient.Get(context.TODO(), adbLookupKey, adb) - if err != nil { - return "" - } - - return adb.Spec.Details.NetworkAccess.AccessType - }, timeout).Should(Equal(NetworkAccessTypePrivate)) - }) - }) - Describe("Test ValidateCreate of the AutonomousDatabase validating webhook", func() { var ( resourceName = "testadb" @@ -119,16 +70,18 @@ var _ = Describe("test AutonomousDatabase webhook", func() { }, Spec: AutonomousDatabaseSpec{ Details: AutonomousDatabaseDetails{ - CompartmentOCID: common.String("fake-compartment-ocid"), - DbName: common.String("fake-dbName"), - DisplayName: common.String("fake-displayName"), - CPUCoreCount: common.Int(1), - AdminPassword: PasswordSpec{ - K8sSecret: K8sSecretSpec{ - Name: common.String("fake-admin-password"), + AutonomousDatabaseBase: AutonomousDatabaseBase{ + CompartmentId: common.String("fake-compartment-ocid"), + DbName: common.String("fake-dbName"), + DisplayName: common.String("fake-displayName"), + CpuCoreCount: common.Int(1), + AdminPassword: PasswordSpec{ + K8sSecret: K8sSecretSpec{ + Name: common.String("fake-admin-password"), + }, }, + DataStorageSizeInTBs: common.Int(1), }, - DataStorageSizeInTBs: common.Int(1), }, }, } @@ -139,7 +92,7 @@ var _ = Describe("test AutonomousDatabase webhook", func() { var errMsg string = "cannot apply k8sSecret.name and ociSecret.ocid at the same time" adb.Spec.Details.AdminPassword.K8sSecret.Name = common.String("test-admin-password") - adb.Spec.Details.AdminPassword.OCISecret.OCID = common.String("fake.ocid1.vaultsecret.oc1...") + adb.Spec.Details.AdminPassword.OciSecret.Id = common.String("fake.ocid1.vaultsecret.oc1...") validateInvalidTest(adb, false, errMsg) }) @@ -147,54 +100,23 @@ var _ = Describe("test AutonomousDatabase webhook", func() { It("Should not apply values to wallet.password.k8sSecret and wallet.password.ociSecret at the same time", func() { var errMsg string = "cannot apply k8sSecret.name and ociSecret.ocid at the same time" - adb.Spec.Details.Wallet.Password.K8sSecret.Name = common.String("test-wallet-password") - adb.Spec.Details.Wallet.Password.OCISecret.OCID = common.String("fake.ocid1.vaultsecret.oc1...") + adb.Spec.Wallet.Password.K8sSecret.Name = common.String("test-wallet-password") + adb.Spec.Wallet.Password.OciSecret.Id = common.String("fake.ocid1.vaultsecret.oc1...") validateInvalidTest(adb, false, errMsg) }) - // Network validation - Context("Shared Autonomous Database", func() { - It("AccessControlList cannot be empty when the network access type is RESTRICTED", func() { - var errMsg string = "accessControlList cannot be empty when the network access type is " + string(NetworkAccessTypeRestricted) - - adb.Spec.Details.NetworkAccess.AccessType = NetworkAccessTypeRestricted - adb.Spec.Details.NetworkAccess.AccessControlList = nil - - validateInvalidTest(adb, false, errMsg) - }) - - It("SubnetOCID and nsgOCIDs cannot be empty when the network access type is PRIVATE", func() { - var errMsg1 string = "subnetOCID cannot be empty when the network access type is " + string(NetworkAccessTypePrivate) - var errMsg2 string = "nsgOCIDs cannot be empty when the network access type is " + string(NetworkAccessTypePrivate) - - adb.Spec.Details.NetworkAccess.AccessType = NetworkAccessTypePrivate - adb.Spec.Details.NetworkAccess.PrivateEndpoint.SubnetOCID = nil - adb.Spec.Details.NetworkAccess.PrivateEndpoint.NsgOCIDs = nil - - validateInvalidTest(adb, false, errMsg1, errMsg2) - }) - - It("IsAccessControlEnabled is not applicable on a shared Autonomous Database", func() { - var errMsg string = "isAccessControlEnabled is not applicable on a shared Autonomous Database" - - adb.Spec.Details.NetworkAccess.IsAccessControlEnabled = common.Bool(true) - - validateInvalidTest(adb, false, errMsg) - }) - }) - Context("Dedicated Autonomous Database", func() { BeforeEach(func() { - adb.Spec.Details.AutonomousContainerDatabase.K8sACD.Name = common.String("testACD") - adb.Spec.Details.AutonomousContainerDatabase.OCIACD.OCID = common.String("fake-acd-ocid") + adb.Spec.Details.AutonomousContainerDatabase.K8sAcd.Name = common.String("testACD") + adb.Spec.Details.AutonomousContainerDatabase.OciAcd.Id = common.String("fake-acd-ocid") }) It("AccessControlList cannot be empty when the network access type is RESTRICTED", func() { var errMsg string = "access control list cannot be provided when Autonomous Database's access control is disabled" - adb.Spec.Details.NetworkAccess.IsAccessControlEnabled = common.Bool(false) - adb.Spec.Details.NetworkAccess.AccessControlList = []string{"192.168.1.1"} + adb.Spec.Details.IsAccessControlEnabled = common.Bool(false) + adb.Spec.Details.WhitelistedIps = []string{"192.168.1.1"} validateInvalidTest(adb, false, errMsg) }) @@ -202,21 +124,12 @@ var _ = Describe("test AutonomousDatabase webhook", func() { It("AccessControlList cannot be empty when the network access type is RESTRICTED", func() { var errMsg string = "isMTLSConnectionRequired is not supported on a dedicated database" - adb.Spec.Details.NetworkAccess.IsMTLSConnectionRequired = common.Bool(true) + adb.Spec.Details.IsMtlsConnectionRequired = common.Bool(true) validateInvalidTest(adb, false, errMsg) }) }) - - // Others - It("Cannot apply lifecycleState to a provision operation", func() { - var errMsg string = "cannot apply lifecycleState to a provision operation" - - adb.Spec.Details.LifecycleState = database.AutonomousDatabaseLifecycleStateStopped - - validateInvalidTest(adb, false, errMsg) - }) }) // Skip the common and network validations since they're already verified in the test for ValidateCreate @@ -242,14 +155,16 @@ var _ = Describe("test AutonomousDatabase webhook", func() { Namespace: namespace, }, Spec: AutonomousDatabaseSpec{ + Action: "Create", Details: AutonomousDatabaseDetails{ - CompartmentOCID: common.String("fake-compartment-ocid"), - AutonomousDatabaseOCID: common.String("fake-adb-ocid"), - DbName: common.String("fake-dbName"), - DisplayName: common.String("fake-displayName"), - CPUCoreCount: common.Int(1), - DataStorageSizeInTBs: common.Int(1), - LifecycleState: database.AutonomousDatabaseLifecycleStateAvailable, + Id: common.String("fake-adb-ocid"), + AutonomousDatabaseBase: AutonomousDatabaseBase{ + CompartmentId: common.String("fake-compartment-ocid"), + DbName: common.String("fake-dbName"), + DisplayName: common.String("fake-displayName"), + CpuCoreCount: common.Int(1), + DataStorageSizeInTBs: common.Int(1), + }, }, }, } @@ -293,16 +208,7 @@ var _ = Describe("test AutonomousDatabase webhook", func() { It("AutonomousDatabaseOCID cannot be modified", func() { var errMsg string = "autonomousDatabaseOCID cannot be modified" - adb.Spec.Details.AutonomousDatabaseOCID = common.String("modified-adb-ocid") - - validateInvalidTest(adb, true, errMsg) - }) - - It("Cannot change lifecycleState with other spec attributes at the same time", func() { - var errMsg string = "cannot change lifecycleState with other spec attributes at the same time" - - adb.Spec.Details.LifecycleState = database.AutonomousDatabaseLifecycleStateStopped - adb.Spec.Details.CPUCoreCount = common.Int(2) + adb.Spec.Details.Id = common.String("modified-adb-ocid") validateInvalidTest(adb, true, errMsg) }) diff --git a/apis/database/v1alpha1/autonomousdatabasebackup_types.go b/apis/database/v1alpha1/autonomousdatabasebackup_types.go index 95c77560..aa70c2d5 100644 --- a/apis/database/v1alpha1/autonomousdatabasebackup_types.go +++ b/apis/database/v1alpha1/autonomousdatabasebackup_types.go @@ -43,6 +43,7 @@ import ( "github.com/oracle/oci-go-sdk/v65/common" "github.com/oracle/oci-go-sdk/v65/database" + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" ) // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! @@ -57,7 +58,7 @@ type AutonomousDatabaseBackupSpec struct { AutonomousDatabaseBackupOCID *string `json:"autonomousDatabaseBackupOCID,omitempty"` IsLongTermBackup *bool `json:"isLongTermBackup,omitempty"` RetentionPeriodInDays *int `json:"retentionPeriodInDays,omitempty"` - OCIConfig OCIConfigSpec `json:"ociConfig,omitempty"` + OCIConfig OciConfigSpec `json:"ociConfig,omitempty"` } // AutonomousDatabaseBackupStatus defines the observed state of AutonomousDatabaseBackup @@ -104,7 +105,7 @@ func init() { SchemeBuilder.Register(&AutonomousDatabaseBackup{}, &AutonomousDatabaseBackupList{}) } -func (b *AutonomousDatabaseBackup) UpdateStatusFromOCIBackup(ociBackup database.AutonomousDatabaseBackup, ociADB database.AutonomousDatabase) { +func (b *AutonomousDatabaseBackup) UpdateStatusFromOCIBackup(ociBackup database.AutonomousDatabaseBackup, ociAdb database.AutonomousDatabase) { b.Status.AutonomousDatabaseOCID = *ociBackup.AutonomousDatabaseId b.Status.CompartmentOCID = *ociBackup.CompartmentId b.Status.Type = ociBackup.Type @@ -112,14 +113,14 @@ func (b *AutonomousDatabaseBackup) UpdateStatusFromOCIBackup(ociBackup database. b.Status.LifecycleState = ociBackup.LifecycleState - b.Status.TimeStarted = FormatSDKTime(ociBackup.TimeStarted) - b.Status.TimeEnded = FormatSDKTime(ociBackup.TimeEnded) + b.Status.TimeStarted = dbv4.FormatSDKTime(ociBackup.TimeStarted) + b.Status.TimeEnded = dbv4.FormatSDKTime(ociBackup.TimeEnded) - b.Status.DBDisplayName = *ociADB.DisplayName - b.Status.DBName = *ociADB.DbName + b.Status.DBDisplayName = *ociAdb.DisplayName + b.Status.DBName = *ociAdb.DbName } // GetTimeEnded returns the status.timeEnded in SDKTime format func (b *AutonomousDatabaseBackup) GetTimeEnded() (*common.SDKTime, error) { - return parseDisplayTime(b.Status.TimeEnded) + return dbv4.ParseDisplayTime(b.Status.TimeEnded) } diff --git a/apis/database/v1alpha1/autonomousdatabasebackup_webhook.go b/apis/database/v1alpha1/autonomousdatabasebackup_webhook.go index 99bf3815..ffa9b888 100644 --- a/apis/database/v1alpha1/autonomousdatabasebackup_webhook.go +++ b/apis/database/v1alpha1/autonomousdatabasebackup_webhook.go @@ -59,21 +59,16 @@ func (r *AutonomousDatabaseBackup) SetupWebhookWithManager(mgr ctrl.Manager) err Complete() } -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! - -//+kubebuilder:webhook:path=/mutate-database-oracle-com-v1alpha1-autonomousdatabasebackup,mutating=true,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=autonomousdatabasebackups,verbs=create;update,versions=v1alpha1,name=mautonomousdatabasebackup.kb.io,admissionReviewVersions={v1} +//+kubebuilder:webhook:path=/mutate-database-oracle-com-v1alpha1-autonomousdatabasebackup,mutating=true,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=autonomousdatabasebackups,verbs=create;update,versions=v1alpha1,name=mautonomousdatabasebackupv1alpha1.kb.io,admissionReviewVersions=v1 var _ webhook.Defaulter = &AutonomousDatabaseBackup{} // Default implements webhook.Defaulter so a webhook will be registered for the type func (r *AutonomousDatabaseBackup) Default() { autonomousdatabasebackuplog.Info("default", "name", r.Name) - - // TODO(user): fill in your defaulting logic. } -// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. -//+kubebuilder:webhook:verbs=create;update,path=/validate-database-oracle-com-v1alpha1-autonomousdatabasebackup,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=autonomousdatabasebackups,versions=v1alpha1,name=vautonomousdatabasebackup.kb.io,admissionReviewVersions={v1} +//+kubebuilder:webhook:verbs=create;update,path=/validate-database-oracle-com-v1alpha1-autonomousdatabasebackup,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=autonomousdatabasebackups,versions=v1alpha1,name=vautonomousdatabasebackupv1alpha1.kb.io,admissionReviewVersions=v1 var _ webhook.Validator = &AutonomousDatabaseBackup{} @@ -96,12 +91,12 @@ func (r *AutonomousDatabaseBackup) ValidateCreate() (admission.Warnings, error) } } - if r.Spec.Target.K8sADB.Name == nil && r.Spec.Target.OCIADB.OCID == nil { + if r.Spec.Target.K8sAdb.Name == nil && r.Spec.Target.OciAdb.Ocid == nil { allErrs = append(allErrs, field.Forbidden(field.NewPath("spec").Child("target"), "target ADB is empty")) } - if r.Spec.Target.K8sADB.Name != nil && r.Spec.Target.OCIADB.OCID != nil { + if r.Spec.Target.K8sAdb.Name != nil && r.Spec.Target.OciAdb.Ocid != nil { allErrs = append(allErrs, field.Forbidden(field.NewPath("spec").Child("target"), "specify either k8sADB or ociADB, but not both")) } @@ -128,14 +123,14 @@ func (r *AutonomousDatabaseBackup) ValidateUpdate(old runtime.Object) (admission "cannot assign a new autonomousDatabaseBackupOCID to this backup")) } - if oldBackup.Spec.Target.K8sADB.Name != nil && r.Spec.Target.K8sADB.Name != nil && - *oldBackup.Spec.Target.K8sADB.Name != *r.Spec.Target.K8sADB.Name { + if oldBackup.Spec.Target.K8sAdb.Name != nil && r.Spec.Target.K8sAdb.Name != nil && + *oldBackup.Spec.Target.K8sAdb.Name != *r.Spec.Target.K8sAdb.Name { allErrs = append(allErrs, field.Forbidden(field.NewPath("spec").Child("target").Child("k8sADB").Child("name"), "cannot assign a new name to the target")) } - if oldBackup.Spec.Target.OCIADB.OCID != nil && r.Spec.Target.OCIADB.OCID != nil && - *oldBackup.Spec.Target.OCIADB.OCID != *r.Spec.Target.OCIADB.OCID { + if oldBackup.Spec.Target.OciAdb.Ocid != nil && r.Spec.Target.OciAdb.Ocid != nil && + *oldBackup.Spec.Target.OciAdb.Ocid != *r.Spec.Target.OciAdb.Ocid { allErrs = append(allErrs, field.Forbidden(field.NewPath("spec").Child("target").Child("ociADB").Child("ocid"), "cannot assign a new ocid to the target")) } diff --git a/apis/database/v1alpha1/autonomousdatabasebackup_webhook_test.go b/apis/database/v1alpha1/autonomousdatabasebackup_webhook_test.go index 497c3f28..87eb1618 100644 --- a/apis/database/v1alpha1/autonomousdatabasebackup_webhook_test.go +++ b/apis/database/v1alpha1/autonomousdatabasebackup_webhook_test.go @@ -75,8 +75,8 @@ var _ = Describe("test AutonomousDatabaseBackup webhook", func() { It("Should specify at least one of the k8sADB and ociADB", func() { var errMsg string = "target ADB is empty" - backup.Spec.Target.K8sADB.Name = nil - backup.Spec.Target.OCIADB.OCID = nil + backup.Spec.Target.K8sAdb.Name = nil + backup.Spec.Target.OciAdb.Ocid = nil validateInvalidTest(backup, false, errMsg) }) @@ -84,8 +84,8 @@ var _ = Describe("test AutonomousDatabaseBackup webhook", func() { It("Should specify either k8sADB or ociADB, but not both", func() { var errMsg string = "specify either k8sADB or ociADB, but not both" - backup.Spec.Target.K8sADB.Name = common.String("fake-target-adb") - backup.Spec.Target.OCIADB.OCID = common.String("fake.ocid1.autonomousdatabase.oc1...") + backup.Spec.Target.K8sAdb.Name = common.String("fake-target-adb") + backup.Spec.Target.OciAdb.Ocid = common.String("fake.ocid1.autonomousdatabase.oc1...") validateInvalidTest(backup, false, errMsg) }) @@ -133,15 +133,15 @@ var _ = Describe("test AutonomousDatabaseBackup webhook", func() { Expect(k8sClient.Delete(context.TODO(), backup)).To(Succeed()) }) - Context("The bakcup is using target.k8sADB.name", func() { + Context("The bakcup is using target.k8sAdb.name", func() { BeforeEach(func() { - backup.Spec.Target.K8sADB.Name = common.String("fake-target-adb") + backup.Spec.Target.K8sAdb.Name = common.String("fake-target-adb") }) It("Cannot assign a new name to the target", func() { var errMsg string = "cannot assign a new name to the target" - backup.Spec.Target.K8sADB.Name = common.String("modified-target-adb") + backup.Spec.Target.K8sAdb.Name = common.String("modified-target-adb") validateInvalidTest(backup, true, errMsg) }) @@ -157,13 +157,13 @@ var _ = Describe("test AutonomousDatabaseBackup webhook", func() { Context("The bakcup is using target.ociADB.ocid", func() { BeforeEach(func() { - backup.Spec.Target.OCIADB.OCID = common.String("fake.ocid1.autonomousdatabase.oc1...") + backup.Spec.Target.OciAdb.Ocid = common.String("fake.ocid1.autonomousdatabase.oc1...") }) It("Cannot assign a new ocid to the target", func() { var errMsg string = "cannot assign a new ocid to the target" - backup.Spec.Target.OCIADB.OCID = common.String("modified.ocid1.autonomousdatabase.oc1...") + backup.Spec.Target.OciAdb.Ocid = common.String("modified.ocid1.autonomousdatabase.oc1...") validateInvalidTest(backup, true, errMsg) }) diff --git a/apis/database/v1alpha1/autonomousdatabaserestore_types.go b/apis/database/v1alpha1/autonomousdatabaserestore_types.go index 4bea0043..ef8698b2 100644 --- a/apis/database/v1alpha1/autonomousdatabaserestore_types.go +++ b/apis/database/v1alpha1/autonomousdatabaserestore_types.go @@ -46,22 +46,23 @@ import ( "github.com/oracle/oci-go-sdk/v65/common" "github.com/oracle/oci-go-sdk/v65/database" "github.com/oracle/oci-go-sdk/v65/workrequests" + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" ) // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. -type K8sADBBackupSpec struct { +type K8sAdbBackupSpec struct { Name *string `json:"name,omitempty"` } -type PITSpec struct { +type PitSpec struct { // The timestamp must follow this format: YYYY-MM-DD HH:MM:SS GMT Timestamp *string `json:"timestamp,omitempty"` } type SourceSpec struct { - K8sADBBackup K8sADBBackupSpec `json:"k8sADBBackup,omitempty"` - PointInTime PITSpec `json:"pointInTime,omitempty"` + K8sAdbBackup K8sAdbBackupSpec `json:"k8sADBBackup,omitempty"` + PointInTime PitSpec `json:"pointInTime,omitempty"` } // AutonomousDatabaseRestoreSpec defines the desired state of AutonomousDatabaseRestore @@ -70,7 +71,7 @@ type AutonomousDatabaseRestoreSpec struct { // Important: Run "make" to regenerate code after modifying this file Target TargetSpec `json:"target"` Source SourceSpec `json:"source"` - OCIConfig OCIConfigSpec `json:"ociConfig,omitempty"` + OCIConfig OciConfigSpec `json:"ociConfig,omitempty"` } // AutonomousDatabaseRestoreStatus defines the observed state of AutonomousDatabaseRestore @@ -120,7 +121,7 @@ func (r *AutonomousDatabaseRestore) GetPIT() (*common.SDKTime, error) { if r.Spec.Source.PointInTime.Timestamp == nil { return nil, errors.New("the timestamp is empty") } - return parseDisplayTime(*r.Spec.Source.PointInTime.Timestamp) + return dbv4.ParseDisplayTime(*r.Spec.Source.PointInTime.Timestamp) } func (r *AutonomousDatabaseRestore) UpdateStatus( @@ -132,7 +133,7 @@ func (r *AutonomousDatabaseRestore) UpdateStatus( r.Status.WorkRequestOCID = *workResp.Id r.Status.Status = workResp.Status - r.Status.TimeAccepted = FormatSDKTime(workResp.TimeAccepted) - r.Status.TimeStarted = FormatSDKTime(workResp.TimeStarted) - r.Status.TimeEnded = FormatSDKTime(workResp.TimeFinished) + r.Status.TimeAccepted = dbv4.FormatSDKTime(workResp.TimeAccepted) + r.Status.TimeStarted = dbv4.FormatSDKTime(workResp.TimeStarted) + r.Status.TimeEnded = dbv4.FormatSDKTime(workResp.TimeFinished) } diff --git a/apis/database/v1alpha1/autonomousdatabaserestore_webhook.go b/apis/database/v1alpha1/autonomousdatabaserestore_webhook.go index 4f96bd7b..dcd57137 100644 --- a/apis/database/v1alpha1/autonomousdatabaserestore_webhook.go +++ b/apis/database/v1alpha1/autonomousdatabaserestore_webhook.go @@ -39,6 +39,7 @@ package v1alpha1 import ( + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" dbcommons "github.com/oracle/oracle-database-operator/commons/database" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" @@ -59,10 +60,7 @@ func (r *AutonomousDatabaseRestore) SetupWebhookWithManager(mgr ctrl.Manager) er Complete() } -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! - -// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. -//+kubebuilder:webhook:verbs=create;update,path=/validate-database-oracle-com-v1alpha1-autonomousdatabaserestore,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=autonomousdatabaserestores,versions=v1alpha1,name=vautonomousdatabaserestore.kb.io,admissionReviewVersions={v1} +//+kubebuilder:webhook:verbs=create;update,path=/validate-database-oracle-com-v1alpha1-autonomousdatabaserestore,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=autonomousdatabaserestores,versions=v1alpha1,name=vautonomousdatabaserestorev1alpha1.kb.io,admissionReviewVersions=v1 var _ webhook.Validator = &AutonomousDatabaseRestore{} @@ -86,24 +84,24 @@ func (r *AutonomousDatabaseRestore) ValidateCreate() (admission.Warnings, error) } // Validate the target ADB - if r.Spec.Target.K8sADB.Name == nil && r.Spec.Target.OCIADB.OCID == nil { + if r.Spec.Target.K8sAdb.Name == nil && r.Spec.Target.OciAdb.Ocid == nil { allErrs = append(allErrs, field.Forbidden(field.NewPath("spec").Child("target"), "target ADB is empty")) } - if r.Spec.Target.K8sADB.Name != nil && r.Spec.Target.OCIADB.OCID != nil { + if r.Spec.Target.K8sAdb.Name != nil && r.Spec.Target.OciAdb.Ocid != nil { allErrs = append(allErrs, field.Forbidden(field.NewPath("spec").Child("target"), "specify either k8sADB.name or ociADB.ocid, but not both")) } // Validate the restore source - if r.Spec.Source.K8sADBBackup.Name == nil && + if r.Spec.Source.K8sAdbBackup.Name == nil && r.Spec.Source.PointInTime.Timestamp == nil { allErrs = append(allErrs, field.Forbidden(field.NewPath("spec").Child("source"), "retore source is empty")) } - if r.Spec.Source.K8sADBBackup.Name != nil && + if r.Spec.Source.K8sAdbBackup.Name != nil && r.Spec.Source.PointInTime.Timestamp != nil { allErrs = append(allErrs, field.Forbidden(field.NewPath("spec").Child("source"), "cannot apply backupName and the PITR parameters at the same time")) @@ -111,7 +109,7 @@ func (r *AutonomousDatabaseRestore) ValidateCreate() (admission.Warnings, error) // Verify the timestamp format if it's PITR if r.Spec.Source.PointInTime.Timestamp != nil { - _, err := parseDisplayTime(*r.Spec.Source.PointInTime.Timestamp) + _, err := dbv4.ParseDisplayTime(*r.Spec.Source.PointInTime.Timestamp) if err != nil { allErrs = append(allErrs, field.Forbidden(field.NewPath("spec").Child("source").Child("pointInTime").Child("timestamp"), "invalid timestamp format")) diff --git a/apis/database/v1alpha1/autonomousdatabaserestore_webhook_test.go b/apis/database/v1alpha1/autonomousdatabaserestore_webhook_test.go index aed87d71..0cc9b692 100644 --- a/apis/database/v1alpha1/autonomousdatabaserestore_webhook_test.go +++ b/apis/database/v1alpha1/autonomousdatabaserestore_webhook_test.go @@ -71,8 +71,8 @@ var _ = Describe("test AutonomousDatabaseRestore webhook", func() { It("Should specify at least one of the k8sADB and ociADB", func() { var errMsg string = "target ADB is empty" - restore.Spec.Target.K8sADB.Name = nil - restore.Spec.Target.OCIADB.OCID = nil + restore.Spec.Target.K8sAdb.Name = nil + restore.Spec.Target.OciAdb.Ocid = nil validateInvalidTest(restore, false, errMsg) }) @@ -80,8 +80,8 @@ var _ = Describe("test AutonomousDatabaseRestore webhook", func() { It("Should specify either k8sADB.name or ociADB.ocid, but not both", func() { var errMsg string = "specify either k8sADB.name or ociADB.ocid, but not both" - restore.Spec.Target.K8sADB.Name = common.String("fake-target-adb") - restore.Spec.Target.OCIADB.OCID = common.String("fake.ocid1.autonomousdatabase.oc1...") + restore.Spec.Target.K8sAdb.Name = common.String("fake-target-adb") + restore.Spec.Target.OciAdb.Ocid = common.String("fake.ocid1.autonomousdatabase.oc1...") validateInvalidTest(restore, false, errMsg) }) @@ -89,7 +89,7 @@ var _ = Describe("test AutonomousDatabaseRestore webhook", func() { It("Should select at least one restore source", func() { var errMsg string = "retore source is empty" - restore.Spec.Source.K8sADBBackup.Name = nil + restore.Spec.Source.K8sAdbBackup.Name = nil restore.Spec.Source.PointInTime.Timestamp = nil validateInvalidTest(restore, false, errMsg) @@ -98,7 +98,7 @@ var _ = Describe("test AutonomousDatabaseRestore webhook", func() { It("Cannot apply backupName and the PITR parameters at the same time", func() { var errMsg string = "cannot apply backupName and the PITR parameters at the same time" - restore.Spec.Source.K8sADBBackup.Name = common.String("fake-source-adb-backup") + restore.Spec.Source.K8sAdbBackup.Name = common.String("fake-source-adb-backup") restore.Spec.Source.PointInTime.Timestamp = common.String("2021-12-23 11:03:13 UTC") validateInvalidTest(restore, false, errMsg) diff --git a/apis/database/v1alpha1/dataguardbroker_conversion.go b/apis/database/v1alpha1/dataguardbroker_conversion.go new file mode 100644 index 00000000..39751a05 --- /dev/null +++ b/apis/database/v1alpha1/dataguardbroker_conversion.go @@ -0,0 +1,14 @@ +package v1alpha1 + +import ( + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +func (src *DataguardBroker) ConvertTo(dst conversion.Hub) error { + return nil +} + +// ConvertFrom converts v1 to v1alpha1 +func (dst *DataguardBroker) ConvertFrom(src conversion.Hub) error { + return nil +} diff --git a/apis/database/v1alpha1/dataguardbroker_types.go b/apis/database/v1alpha1/dataguardbroker_types.go index 37d71b92..768d6dd3 100644 --- a/apis/database/v1alpha1/dataguardbroker_types.go +++ b/apis/database/v1alpha1/dataguardbroker_types.go @@ -56,20 +56,10 @@ type DataguardBrokerSpec struct { LoadBalancer bool `json:"loadBalancer,omitempty"` ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` // +kubebuilder:validation:Enum=MaxPerformance;MaxAvailability - ProtectionMode string `json:"protectionMode"` - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - FastStartFailOver DataguardBrokerFastStartFailOver `json:"fastStartFailOver,omitempty"` -} - -type DataguardBrokerFastStartFailOver struct { - Enable bool `json:"enable,omitempty"` - Strategy []DataguardBrokerStrategy `json:"strategy,omitempty"` -} + ProtectionMode string `json:"protectionMode"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` -// FSFO strategy -type DataguardBrokerStrategy struct { - SourceDatabaseRef string `json:"sourceDatabaseRef,omitempty"` - TargetDatabaseRefs string `json:"targetDatabaseRefs,omitempty"` + FastStartFailover bool `json:"fastStartFailover,omitempty"` } // DataguardBrokerStatus defines the observed state of DataguardBroker @@ -84,10 +74,13 @@ type DataguardBrokerStatus struct { ExternalConnectString string `json:"externalConnectString,omitempty"` ClusterConnectString string `json:"clusterConnectString,omitempty"` Status string `json:"status,omitempty"` + + FastStartFailover string `json:"fastStartFailover,omitempty"` + DatabasesInDataguardConfig map[string]string `json:"databasesInDataguardConfig,omitempty"` } -//+kubebuilder:object:root=true -//+kubebuilder:subresource:status +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status // +kubebuilder:printcolumn:JSONPath=".status.primaryDatabase",name="Primary",type="string" // +kubebuilder:printcolumn:JSONPath=".status.standbyDatabases",name="Standbys",type="string" // +kubebuilder:printcolumn:JSONPath=".spec.protectionMode",name="Protection Mode",type="string" @@ -95,6 +88,7 @@ type DataguardBrokerStatus struct { // +kubebuilder:printcolumn:JSONPath=".status.externalConnectString",name="Connect Str",type="string" // +kubebuilder:printcolumn:JSONPath=".spec.primaryDatabaseRef",name="Primary Database",type="string", priority=1 // +kubebuilder:printcolumn:JSONPath=".status.status",name="Status",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.fastStartFailover",name="FSFO", type="string" // DataguardBroker is the Schema for the dataguardbrokers API type DataguardBroker struct { @@ -105,6 +99,55 @@ type DataguardBroker struct { Status DataguardBrokerStatus `json:"status,omitempty"` } +// ////////////////////////////////////////////////////////////////////////////////////////////////// +// Returns the current primary database in the dataguard configuration from the resource status/spec +// ////////////////////////////////////////////////////////////////////////////////////////////////// +func (broker *DataguardBroker) GetCurrentPrimaryDatabase() string { + if broker.Status.PrimaryDatabase != "" { + return broker.Status.DatabasesInDataguardConfig[broker.Status.PrimaryDatabase] + } + return broker.Spec.PrimaryDatabaseRef +} + +// ////////////////////////////////////////////////////////////////////////////////////////////////// +// Returns databases in Dataguard configuration from the resource status/spec +// ////////////////////////////////////////////////////////////////////////////////////////////////// +func (broker *DataguardBroker) GetDatabasesInDataGuardConfiguration() []string { + var databases []string + if len(broker.Status.DatabasesInDataguardConfig) > 0 { + for _, value := range broker.Status.DatabasesInDataguardConfig { + if value != "" { + databases = append(databases, value) + } + } + + return databases + } + + databases = append(databases, broker.Spec.PrimaryDatabaseRef) + databases = append(databases, broker.Spec.StandbyDatabaseRefs...) + return databases +} + +// ////////////////////////////////////////////////////////////////////////////////////////////////// +// Returns standby databases in the dataguard configuration from the resource status/spec +// ////////////////////////////////////////////////////////////////////////////////////////////////// +func (broker *DataguardBroker) GetStandbyDatabasesInDgConfig() []string { + var databases []string + if len(broker.Status.DatabasesInDataguardConfig) > 0 { + for _, value := range broker.Status.DatabasesInDataguardConfig { + if value != "" && value != broker.Status.PrimaryDatabase { + databases = append(databases, value) + } + } + + return databases + } + + databases = append(databases, broker.Spec.StandbyDatabaseRefs...) + return databases +} + //+kubebuilder:object:root=true // DataguardBrokerList contains a list of DataguardBroker diff --git a/apis/database/v1alpha1/dataguardbroker_webhook.go b/apis/database/v1alpha1/dataguardbroker_webhook.go index a9d59286..89a9d3fd 100644 --- a/apis/database/v1alpha1/dataguardbroker_webhook.go +++ b/apis/database/v1alpha1/dataguardbroker_webhook.go @@ -39,6 +39,7 @@ package v1alpha1 import ( + "strconv" "strings" dbcommons "github.com/oracle/oracle-database-operator/commons/database" @@ -89,6 +90,10 @@ func (r *DataguardBroker) Default() { r.Spec.ServiceAnnotations["service.beta.kubernetes.io/oci-load-balancer-shape-flex-max"] = "100" } } + + if r.Spec.SetAsPrimaryDatabase != "" { + r.Spec.SetAsPrimaryDatabase = strings.ToUpper(r.Spec.SetAsPrimaryDatabase) + } } // TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. @@ -154,6 +159,11 @@ func (r *DataguardBroker) ValidateUpdate(old runtime.Object) (admission.Warnings allErrs = append(allErrs, field.Forbidden(field.NewPath("spec").Child("primaryDatabaseRef"), "cannot be changed")) } + fastStartFailoverStatus, _ := strconv.ParseBool(oldObj.Status.FastStartFailover) + if (fastStartFailoverStatus || r.Spec.FastStartFailover) && r.Spec.SetAsPrimaryDatabase != "" { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("setAsPrimaryDatabase"), "switchover not supported when fastStartFailover is true")) + } if len(allErrs) == 0 { return nil, nil diff --git a/apis/database/v1alpha1/dbcssystem_conversion.go b/apis/database/v1alpha1/dbcssystem_conversion.go new file mode 100644 index 00000000..0aa6a258 --- /dev/null +++ b/apis/database/v1alpha1/dbcssystem_conversion.go @@ -0,0 +1,14 @@ +package v1alpha1 + +import ( + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +func (src *DbcsSystem) ConvertTo(dst conversion.Hub) error { + return nil +} + +// ConvertFrom converts v1 to v1alpha1 +func (dst *DbcsSystem) ConvertFrom(src conversion.Hub) error { + return nil +} diff --git a/apis/database/v1alpha1/dbcssystem_kms_types.go b/apis/database/v1alpha1/dbcssystem_kms_types.go new file mode 100644 index 00000000..c90726e3 --- /dev/null +++ b/apis/database/v1alpha1/dbcssystem_kms_types.go @@ -0,0 +1,141 @@ +/* +** Copyright (c) 2022-2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ +package v1alpha1 + +import "encoding/json" + +type KMSConfig struct { + VaultName string `json:"vaultName,omitempty"` + CompartmentId string `json:"compartmentId,omitempty"` + KeyName string `json:"keyName,omitempty"` + EncryptionAlgo string `json:"encryptionAlgo,omitempty"` + VaultType string `json:"vaultType,omitempty"` +} +type KMSDetailsStatus struct { + VaultId string `json:"vaultId,omitempty"` + ManagementEndpoint string `json:"managementEndpoint,omitempty"` + KeyId string `json:"keyId,omitempty"` + VaultName string `json:"vaultName,omitempty"` + CompartmentId string `json:"compartmentId,omitempty"` + KeyName string `json:"keyName,omitempty"` + EncryptionAlgo string `json:"encryptionAlgo,omitempty"` + VaultType string `json:"vaultType,omitempty"` +} + +const ( + lastSuccessfulKMSConfig = "lastSuccessfulKMSConfig" + lastSuccessfulKMSStatus = "lastSuccessfulKMSStatus" +) + +// GetLastSuccessfulKMSConfig returns the KMS config from the last successful reconciliation. +// Returns nil, nil if there is no lastSuccessfulKMSConfig. +func (dbcs *DbcsSystem) GetLastSuccessfulKMSConfig() (*KMSConfig, error) { + val, ok := dbcs.GetAnnotations()[lastSuccessfulKMSConfig] + if !ok { + return nil, nil + } + + configBytes := []byte(val) + kmsConfig := KMSConfig{} + + err := json.Unmarshal(configBytes, &kmsConfig) + if err != nil { + return nil, err + } + + return &kmsConfig, nil +} + +// GetLastSuccessfulKMSStatus returns the KMS status from the last successful reconciliation. +// Returns nil, nil if there is no lastSuccessfulKMSStatus. +func (dbcs *DbcsSystem) GetLastSuccessfulKMSStatus() (*KMSDetailsStatus, error) { + val, ok := dbcs.GetAnnotations()[lastSuccessfulKMSStatus] + if !ok { + return nil, nil + } + + statusBytes := []byte(val) + kmsStatus := KMSDetailsStatus{} + + err := json.Unmarshal(statusBytes, &kmsStatus) + if err != nil { + return nil, err + } + + return &kmsStatus, nil +} + +// SetLastSuccessfulKMSConfig saves the given KMSConfig to the annotations. +func (dbcs *DbcsSystem) SetLastSuccessfulKMSConfig(kmsConfig *KMSConfig) error { + configBytes, err := json.Marshal(kmsConfig) + if err != nil { + return err + } + + annotations := dbcs.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + } + annotations[lastSuccessfulKMSConfig] = string(configBytes) + dbcs.SetAnnotations(annotations) + return nil +} + +// SetLastSuccessfulKMSStatus saves the given KMSDetailsStatus to the annotations. +func (dbcs *DbcsSystem) SetLastSuccessfulKMSStatus(kmsStatus *KMSDetailsStatus) error { + statusBytes, err := json.Marshal(kmsStatus) + if err != nil { + return err + } + + annotations := dbcs.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + } + annotations[lastSuccessfulKMSStatus] = string(statusBytes) + dbcs.SetAnnotations(annotations) + // Update KMSDetailsStatus in DbcsSystemStatus + dbcs.Status.KMSDetailsStatus = KMSDetailsStatus{ + VaultName: kmsStatus.VaultName, + CompartmentId: kmsStatus.CompartmentId, + KeyName: kmsStatus.KeyName, + EncryptionAlgo: kmsStatus.EncryptionAlgo, + VaultType: kmsStatus.VaultType, + } + return nil +} diff --git a/apis/database/v1alpha1/dbcssystem_pdbconfig_types.go b/apis/database/v1alpha1/dbcssystem_pdbconfig_types.go new file mode 100644 index 00000000..1b745e09 --- /dev/null +++ b/apis/database/v1alpha1/dbcssystem_pdbconfig_types.go @@ -0,0 +1,83 @@ +/* +** Copyright (c) 2022-2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ +package v1alpha1 + +// PDBConfig defines details of PDB struct for DBCS systems +type PDBConfig struct { + // The name for the pluggable database (PDB). The name is unique in the context of a Database. The name must begin with an alphabetic character and can contain a maximum of thirty alphanumeric characters. Special characters are not permitted. The pluggable database name should not be same as the container database name. + PdbName *string `mandatory:"true" json:"pdbName"` + + // The OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the CDB + // ContainerDatabaseId *string `mandatory:"false" json:"containerDatabaseId"` + + // // A strong password for PDB Admin. The password must be at least nine characters and contain at least two uppercase, two lowercase, two numbers, and two special characters. The special characters must be _, \#, or -. + PdbAdminPassword *string `mandatory:"false" json:"pdbAdminPassword"` + + // // The existing TDE wallet password of the CDB. + TdeWalletPassword *string `mandatory:"false" json:"tdeWalletPassword"` + + // // The locked mode of the pluggable database admin account. If false, the user needs to provide the PDB Admin Password to connect to it. + // // If true, the pluggable database will be locked and user cannot login to it. + ShouldPdbAdminAccountBeLocked *bool `mandatory:"false" json:"shouldPdbAdminAccountBeLocked"` + + // // Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. + // // For more information, see Resource Tags (https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). + // // Example: `{"Department": "Finance"}` + FreeformTags map[string]string `mandatory:"false" json:"freeformTags"` + + // // Defined tags for this resource. Each key is predefined and scoped to a namespace. + // // For more information, see Resource Tags (https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). + // DefinedTags map[string]map[string]interface{} `mandatory:"false" json:"definedTags"` + + // To specify whether to delete the PDB + IsDelete *bool `mandatory:"false" json:"isDelete,omitempty"` + + // The OCID of the PDB for deletion purposes. + PluggableDatabaseId *string `mandatory:"false" json:"pluggableDatabaseId,omitempty"` +} + +type PDBConfigStatus struct { + PdbName *string `mandatory:"true" json:"pdbName"` + ShouldPdbAdminAccountBeLocked *bool `mandatory:"false" json:"shouldPdbAdminAccountBeLocked"` + FreeformTags map[string]string `mandatory:"false" json:"freeformTags"` + PluggableDatabaseId *string `mandatory:"false" json:"pluggableDatabaseId,omitempty"` + PdbLifecycleState LifecycleState `json:"pdbState,omitempty"` +} +type PDBDetailsStatus struct { + PDBConfigStatus []PDBConfigStatus `json:"pdbConfigStatus,omitempty"` +} diff --git a/apis/database/v1alpha1/dbcssystem_types.go b/apis/database/v1alpha1/dbcssystem_types.go index 37e80a6b..d49fde8c 100644 --- a/apis/database/v1alpha1/dbcssystem_types.go +++ b/apis/database/v1alpha1/dbcssystem_types.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2022 Oracle and/or its affiliates. +** Copyright (c) 2022-2024 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -42,6 +42,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/go-logr/logr" dbcsv1 "github.com/oracle/oracle-database-operator/commons/annotations" "sigs.k8s.io/controller-runtime/pkg/client" @@ -52,11 +53,17 @@ import ( // DbcsSystemSpec defines the desired state of DbcsSystem type DbcsSystemSpec struct { - DbSystem DbSystemDetails `json:"dbSystem,omitempty"` - Id *string `json:"id,omitempty"` - OCIConfigMap string `json:"ociConfigMap"` - OCISecret string `json:"ociSecret,omitempty"` - HardLink bool `json:"hardLink,omitempty"` + DbSystem DbSystemDetails `json:"dbSystem,omitempty"` + Id *string `json:"id,omitempty"` + OCIConfigMap *string `json:"ociConfigMap"` + OCISecret *string `json:"ociSecret,omitempty"` + DbClone *DbCloneConfig `json:"dbClone,omitempty"` + HardLink bool `json:"hardLink,omitempty"` + PdbConfigs []PDBConfig `json:"pdbConfigs,omitempty"` + SetupDBCloning bool `json:"setupDBCloning,omitempty"` + DbBackupId *string `json:"dbBackupId,omitempty"` + DatabaseId *string `json:"databaseId,omitempty"` + KMSConfig KMSConfig `json:"kmsConfig,omitempty"` } // DbSystemDetails Spec @@ -66,7 +73,7 @@ type DbSystemDetails struct { AvailabilityDomain string `json:"availabilityDomain"` SubnetId string `json:"subnetId"` Shape string `json:"shape"` - SshPublicKeys []string `json:"sshPublicKeys"` + SshPublicKeys []string `json:"sshPublicKeys,omitempty"` HostName string `json:"hostName"` CpuCoreCount int `json:"cpuCoreCount,omitempty"` FaultDomains []string `json:"faultDomains,omitempty"` @@ -78,8 +85,6 @@ type DbSystemDetails struct { Domain string `json:"domain,omitempty"` InitialDataStorageSizeInGB int `json:"initialDataStorageSizeInGB,omitempty"` ClusterName string `json:"clusterName,omitempty"` - KmsKeyId string `json:"kmsKeyId,omitempty"` - KmsKeyVersionId string `json:"kmsKeyVersionId,omitempty"` DbAdminPaswordSecret string `json:"dbAdminPaswordSecret"` DbName string `json:"dbName,omitempty"` PdbName string `json:"pdbName,omitempty"` @@ -94,9 +99,10 @@ type DbSystemDetails struct { TdeWalletPasswordSecret string `json:"tdeWalletPasswordSecret,omitempty"` Tags map[string]string `json:"tags,omitempty"` DbBackupConfig Backupconfig `json:"dbBackupConfig,omitempty"` + KMSConfig KMSConfig `json:"kmsConfig,omitempty"` } -// DB Backup COnfig Network Struct +// DB Backup Config Network Struct type Backupconfig struct { AutoBackupEnabled *bool `json:"autoBackupEnabled,omitempty"` RecoveryWindowsInDays *int `json:"recoveryWindowsInDays,omitempty"` @@ -121,11 +127,14 @@ type DbcsSystemStatus struct { DataStorageSizeInGBs *int `json:"dataStorageSizeInGBs,omitempty"` RecoStorageSizeInGB *int `json:"recoStorageSizeInGB,omitempty"` - Shape *string `json:"shape,omitempty"` - State LifecycleState `json:"state"` - DbInfo []DbStatus `json:"dbInfo,omitempty"` - Network VmNetworkDetails `json:"network,omitempty"` - WorkRequests []DbWorkrequests `json:"workRequests,omitempty"` + Shape *string `json:"shape,omitempty"` + State LifecycleState `json:"state"` + DbInfo []DbStatus `json:"dbInfo,omitempty"` + Network VmNetworkDetails `json:"network,omitempty"` + WorkRequests []DbWorkrequests `json:"workRequests,omitempty"` + KMSDetailsStatus KMSDetailsStatus `json:"kmsDetailsStatus,omitempty"` + DbCloneStatus DbCloneStatus `json:"dbCloneStatus,omitempty"` + PdbDetailsStatus []PDBDetailsStatus `json:"pdbDetailsStatus,omitempty"` } // DbcsSystemStatus defines the observed state of DbcsSystem @@ -156,17 +165,49 @@ type VmNetworkDetails struct { NetworkSG string `json:"networkSG,omitempty"` } +// DbCloneConfig defines the configuration for the database clone +type DbCloneConfig struct { + DbAdminPaswordSecret string `json:"dbAdminPaswordSecret,omitempty"` + TdeWalletPasswordSecret string `json:"tdeWalletPasswordSecret,omitempty"` + DbName string `json:"dbName"` + HostName string `json:"hostName"` + DbUniqueName string `json:"dbDbUniqueName"` + DisplayName string `json:"displayName"` + LicenseModel string `json:"licenseModel,omitempty"` + Domain string `json:"domain,omitempty"` + SshPublicKeys []string `json:"sshPublicKeys,omitempty"` + SubnetId string `json:"subnetId"` + SidPrefix string `json:"sidPrefix,omitempty"` + InitialDataStorageSizeInGB int `json:"initialDataStorageSizeInGB,omitempty"` + KmsKeyId string `json:"kmsKeyId,omitempty"` + KmsKeyVersionId string `json:"kmsKeyVersionId,omitempty"` + PrivateIp string `json:"privateIp,omitempty"` +} + +// DbCloneStatus defines the observed state of DbClone +type DbCloneStatus struct { + Id *string `json:"id,omitempty"` + DbAdminPaswordSecret string `json:"dbAdminPaswordSecret,omitempty"` + DbName string `json:"dbName,omitempty"` + HostName string `json:"hostName"` + DbUniqueName string `json:"dbDbUniqueName"` + DisplayName string `json:"displayName,omitempty"` + LicenseModel string `json:"licenseModel,omitempty"` + Domain string `json:"domain,omitempty"` + SshPublicKeys []string `json:"sshPublicKeys,omitempty"` + SubnetId string `json:"subnetId,omitempty"` +} + // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:resource:path=DbcsSystem,scope=Namespaced +// +kubebuilder:resource:path=dbcssystems,scope=Namespaced // DbcsSystem is the Schema for the dbcssystems API type DbcsSystem struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec DbcsSystemSpec `json:"spec,omitempty"` - Status DbcsSystemStatus `json:"status,omitempty"` + Spec DbcsSystemSpec `json:"spec,omitempty"` + Status DbcsSystemStatus `json:"status,omitempty"` } //+kubebuilder:object:root=true @@ -208,6 +249,25 @@ func (dbcs *DbcsSystem) GetLastSuccessfulSpec() (*DbcsSystemSpec, error) { return &sucSpec, nil } +func (dbcs *DbcsSystem) GetLastSuccessfulSpecWithLog(log logr.Logger) (*DbcsSystemSpec, error) { + val, ok := dbcs.GetAnnotations()[lastSuccessfulSpec] + if !ok { + log.Info("No last successful spec annotation found") + return nil, nil + } + + specBytes := []byte(val) + sucSpec := DbcsSystemSpec{} + + err := json.Unmarshal(specBytes, &sucSpec) + if err != nil { + log.Error(err, "Failed to unmarshal last successful spec") + return nil, err + } + + log.Info("Successfully retrieved last successful spec", "spec", sucSpec) + return &sucSpec, nil +} // UpdateLastSuccessfulSpec updates lastSuccessfulSpec with the current spec. func (dbcs *DbcsSystem) UpdateLastSuccessfulSpec(kubeClient client.Client) error { diff --git a/apis/database/v1alpha1/dbcssystem_webhook.go b/apis/database/v1alpha1/dbcssystem_webhook.go new file mode 100644 index 00000000..dc9f8934 --- /dev/null +++ b/apis/database/v1alpha1/dbcssystem_webhook.go @@ -0,0 +1,98 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var dbcssystemlog = logf.Log.WithName("dbcssystem-resource") + +func (r *DbcsSystem) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! + +//+kubebuilder:webhook:path=/mutate-database-oracle-com-v4-dbcssystem,mutating=true,failurePolicy=fail,sideEffects=none,groups=database.oracle.com,resources=dbcssystems,verbs=create;update,versions=v4,name=mdbcssystemv1alpha1.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &DbcsSystem{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *DbcsSystem) Default() { + dbcssystemlog.Info("default", "name", r.Name) + + // TODO(user): fill in your defaulting logic. +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. + +// +kubebuilder:webhook:verbs=create;update;delete,path=/validate-database-oracle-com-v4-dbcssystem,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=dbcssystems,versions=v4,name=vdbcssystemv1alpha1.kb.io,admissionReviewVersions=v1 +var _ webhook.Validator = &DbcsSystem{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *DbcsSystem) ValidateCreate() (admission.Warnings, error) { + dbcssystemlog.Info("validate create", "name", r.Name) + + // // TODO(user): fill in your validation logic upon object creation. + return nil, nil +} + +// // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *DbcsSystem) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + dbcssystemlog.Info("validate update", "name", r.Name) + + // // TODO(user): fill in your validation logic upon object update. + return nil, nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *DbcsSystem) ValidateDelete() (admission.Warnings, error) { + dbcssystemlog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} diff --git a/apis/database/v1alpha1/oraclerestdataservice_conversion.go b/apis/database/v1alpha1/oraclerestdataservice_conversion.go new file mode 100644 index 00000000..a16e1ff6 --- /dev/null +++ b/apis/database/v1alpha1/oraclerestdataservice_conversion.go @@ -0,0 +1,14 @@ +package v1alpha1 + +import ( + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +func (src *OracleRestDataService) ConvertTo(dst conversion.Hub) error { + return nil +} + +// ConvertFrom converts v1 to v1alpha1 +func (dst *OracleRestDataService) ConvertFrom(src conversion.Hub) error { + return nil +} diff --git a/apis/database/v1alpha1/oraclerestdataservice_types.go b/apis/database/v1alpha1/oraclerestdataservice_types.go index 37f61a8a..bab04092 100644 --- a/apis/database/v1alpha1/oraclerestdataservice_types.go +++ b/apis/database/v1alpha1/oraclerestdataservice_types.go @@ -56,17 +56,18 @@ type OracleRestDataServiceSpec struct { NodeSelector map[string]string `json:"nodeSelector,omitempty"` Image OracleRestDataServiceImage `json:"image,omitempty"` OrdsPassword OracleRestDataServicePassword `json:"ordsPassword"` - ApexPassword OracleRestDataServicePassword `json:"apexPassword,omitempty"` AdminPassword OracleRestDataServicePassword `json:"adminPassword"` OrdsUser string `json:"ordsUser,omitempty"` RestEnableSchemas []OracleRestDataServiceRestEnableSchemas `json:"restEnableSchemas,omitempty"` OracleService string `json:"oracleService,omitempty"` ServiceAccountName string `json:"serviceAccountName,omitempty"` Persistence OracleRestDataServicePersistence `json:"persistence,omitempty"` + MongoDbApi bool `json:"mongoDbApi,omitempty"` // +k8s:openapi-gen=true // +kubebuilder:validation:Minimum=1 - Replicas int `json:"replicas,omitempty"` + Replicas int `json:"replicas,omitempty"` + ReadinessCheckPeriod int `json:"readinessCheckPeriod,omitempty"` } // OracleRestDataServicePersistence defines the storage releated params @@ -75,8 +76,9 @@ type OracleRestDataServicePersistence struct { StorageClass string `json:"storageClass,omitempty"` // +kubebuilder:validation:Enum=ReadWriteOnce;ReadWriteMany - AccessMode string `json:"accessMode,omitempty"` - VolumeName string `json:"volumeName,omitempty"` + AccessMode string `json:"accessMode,omitempty"` + VolumeName string `json:"volumeName,omitempty"` + SetWritePermissions *bool `json:"setWritePermissions,omitempty"` } // OracleRestDataServiceImage defines the Image source and pullSecrets for POD @@ -106,17 +108,19 @@ type OracleRestDataServiceRestEnableSchemas struct { type OracleRestDataServiceStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster // Important: Run "make" to regenerate code after modifying this file - Status string `json:"status,omitempty"` - DatabaseApiUrl string `json:"databaseApiUrl,omitempty"` - LoadBalancer string `json:"loadBalancer,omitempty"` - DatabaseRef string `json:"databaseRef,omitempty"` - ServiceIP string `json:"serviceIP,omitempty"` - DatabaseActionsUrl string `json:"databaseActionsUrl,omitempty"` - OrdsInstalled bool `json:"ordsInstalled,omitempty"` - ApexConfigured bool `json:"apexConfigured,omitempty"` - ApxeUrl string `json:"apexUrl,omitempty"` - CommonUsersCreated bool `json:"commonUsersCreated,omitempty"` - Replicas int `json:"replicas,omitempty"` + Status string `json:"status,omitempty"` + DatabaseApiUrl string `json:"databaseApiUrl,omitempty"` + LoadBalancer string `json:"loadBalancer,omitempty"` + DatabaseRef string `json:"databaseRef,omitempty"` + ServiceIP string `json:"serviceIP,omitempty"` + DatabaseActionsUrl string `json:"databaseActionsUrl,omitempty"` + MongoDbApiAccessUrl string `json:"mongoDbApiAccessUrl,omitempty"` + OrdsInstalled bool `json:"ordsInstalled,omitempty"` + ApexConfigured bool `json:"apexConfigured,omitempty"` + ApxeUrl string `json:"apexUrl,omitempty"` + MongoDbApi bool `json:"mongoDbApi,omitempty"` + CommonUsersCreated bool `json:"commonUsersCreated,omitempty"` + Replicas int `json:"replicas,omitempty"` Image OracleRestDataServiceImage `json:"image,omitempty"` } @@ -128,6 +132,7 @@ type OracleRestDataServiceStatus struct { // +kubebuilder:printcolumn:JSONPath=".status.databaseApiUrl",name="Database API URL",type="string" // +kubebuilder:printcolumn:JSONPath=".status.databaseActionsUrl",name="Database Actions URL",type="string" // +kubebuilder:printcolumn:JSONPath=".status.apexUrl",name="Apex URL",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.mongoDbApiAccessUrl",name="MongoDbApi Access URL",type="string" // OracleRestDataService is the Schema for the oraclerestdataservices API type OracleRestDataService struct { diff --git a/apis/database/v1alpha1/oraclerestdataservice_webhook.go b/apis/database/v1alpha1/oraclerestdataservice_webhook.go index bfe3208c..c5ecde1c 100644 --- a/apis/database/v1alpha1/oraclerestdataservice_webhook.go +++ b/apis/database/v1alpha1/oraclerestdataservice_webhook.go @@ -74,9 +74,6 @@ func (r *OracleRestDataService) Default() { if r.Spec.OrdsPassword.KeepSecret == nil { r.Spec.OrdsPassword.KeepSecret = &keepSecret } - if r.Spec.ApexPassword.KeepSecret == nil { - r.Spec.ApexPassword.KeepSecret = &keepSecret - } if r.Spec.AdminPassword.KeepSecret == nil { r.Spec.AdminPassword.KeepSecret = &keepSecret } diff --git a/apis/database/v1alpha1/shardingdatabase_conversion.go b/apis/database/v1alpha1/shardingdatabase_conversion.go new file mode 100644 index 00000000..d8db75ca --- /dev/null +++ b/apis/database/v1alpha1/shardingdatabase_conversion.go @@ -0,0 +1,14 @@ +package v1alpha1 + +import ( + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +func (src *ShardingDatabase) ConvertTo(dst conversion.Hub) error { + return nil +} + +// ConvertFrom converts v1 to v1alpha1 +func (dst *ShardingDatabase) ConvertFrom(src conversion.Hub) error { + return nil +} diff --git a/apis/database/v1alpha1/shardingdatabase_types.go b/apis/database/v1alpha1/shardingdatabase_types.go index ffc17ab0..ae9066fc 100644 --- a/apis/database/v1alpha1/shardingdatabase_types.go +++ b/apis/database/v1alpha1/shardingdatabase_types.go @@ -68,7 +68,6 @@ type ShardingDatabaseSpec struct { GsmImagePullSecret string `json:"gsmImagePullSecret,omitempty"` // Optional The name of an image pull secret in case of a private docker repository. StagePvcName string `json:"stagePvcName,omitempty"` // the Stagepvc for the backup of cluster PortMappings []PortMapping `json:"portMappings,omitempty"` // Port mappings for the service that is created. The service is created if there is at least - Namespace string `json:"namespace,omitempty"` // Target namespace of the application. IsDebug bool `json:"isDebug,omitempty"` // Optional parameter to enable logining IsExternalSvc bool `json:"isExternalSvc,omitempty"` IsClone bool `json:"isClone,omitempty"` @@ -95,6 +94,7 @@ type ShardingDatabaseSpec struct { FssStorageClass string `json:"fssStorageClass,omitempty"` TdeWalletPvcMountLocation string `json:"tdeWalletPvcMountLocation,omitempty"` DbEdition string `json:"dbEdition,omitempty"` + TopicId string `json:"topicId,omitempty"` } // To understand Metav1.Condition, please refer the link https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1 @@ -215,6 +215,7 @@ type GsmSpec struct { Label string `json:"label,omitempty"` // Optional GSM Label IsDelete string `json:"isDelete,omitempty"` NodeSelector map[string]string `json:"nodeSelector,omitempty"` + PvAnnotations map[string]string `json:"pvAnnotations,omitempty"` PvMatchLabels map[string]string `json:"pvMatchLabels,omitempty"` ImagePulllPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` Region string `json:"region,omitempty"` @@ -364,6 +365,7 @@ const ( var KubeConfigOnce sync.Once // #const lastSuccessfulSpec = "lastSuccessfulSpec" +const lastSuccessfulSpecOnsInfo = "lastSuccessfulSpeOnsInfo" // GetLastSuccessfulSpec returns spec from the lass successful reconciliation. // Returns nil, nil if there is no lastSuccessfulSpec. @@ -398,6 +400,27 @@ func (shardingv1 *ShardingDatabase) UpdateLastSuccessfulSpec(kubeClient client.C return annsv1.PatchAnnotations(kubeClient, shardingv1, anns) } +// GetLastSuccessfulOnsInfo returns spec from the lass successful reconciliation. +// Returns nil, nil if there is no lastSuccessfulSpec. +func (shardingv1 *ShardingDatabase) GetLastSuccessfulOnsInfo() ([]byte, error) { + val, ok := shardingv1.GetAnnotations()[lastSuccessfulSpecOnsInfo] + if !ok { + return nil, nil + } + specBytes := []byte(val) + return specBytes, nil +} + +// UpdateLastSuccessfulSpec updates lastSuccessfulSpec with the current spec. +func (shardingv1 *ShardingDatabase) UpdateLastSuccessfulSpecOnsInfo(kubeClient client.Client, specBytes []byte) error { + + anns := map[string]string{ + lastSuccessfulSpecOnsInfo: string(specBytes), + } + + return annsv1.PatchAnnotations(kubeClient, shardingv1, anns) +} + func init() { SchemeBuilder.Register(&ShardingDatabase{}, &ShardingDatabaseList{}) } diff --git a/apis/database/v1alpha1/shardingdatabase_webhook.go b/apis/database/v1alpha1/shardingdatabase_webhook.go index 8b91fb0c..4e7ea2e7 100644 --- a/apis/database/v1alpha1/shardingdatabase_webhook.go +++ b/apis/database/v1alpha1/shardingdatabase_webhook.go @@ -62,7 +62,7 @@ func (r *ShardingDatabase) SetupWebhookWithManager(mgr ctrl.Manager) error { // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -//+kubebuilder:webhook:path=/mutate-database-oracle-com-v1alpha1-shardingdatabase,mutating=true,failurePolicy=fail,sideEffects=none,groups=database.oracle.com,resources=shardingdatabases,verbs=create;update,versions=v1alpha1,name=mshardingdatabase.kb.io,admissionReviewVersions={v1} +//+kubebuilder:webhook:path=/mutate-database-oracle-com-v1alpha1-shardingdatabase,mutating=true,failurePolicy=fail,sideEffects=none,groups=database.oracle.com,resources=shardingdatabases,verbs=create;update,versions=v1alpha1,name=mshardingdatabasev1alpha1.kb.io,admissionReviewVersions=v1 var _ webhook.Defaulter = &ShardingDatabase{} @@ -87,7 +87,7 @@ func (r *ShardingDatabase) Default() { } // TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. -//+kubebuilder:webhook:verbs=create;update;delete,path=/validate-database-oracle-com-v1alpha1-shardingdatabase,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=shardingdatabases,versions=v1alpha1,name=vshardingdatabase.kb.io,admissionReviewVersions={v1} +//+kubebuilder:webhook:verbs=create;update;delete,path=/validate-database-oracle-com-v1alpha1-shardingdatabase,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=shardingdatabases,versions=v1alpha1,name=vshardingdatabasev1alpha1.kb.io,admissionReviewVersions=v1 var _ webhook.Validator = &ShardingDatabase{} @@ -177,6 +177,16 @@ func (r *ShardingDatabase) ValidateCreate() (admission.Warnings, error) { validationErr = append(validationErr, validationErrs1...) } + validationErrs1 = r.validateCatalogName() + if validationErrs1 != nil { + validationErr = append(validationErr, validationErrs1...) + } + + validationErrs1 = r.validateShardName() + if validationErrs1 != nil { + validationErr = append(validationErr, validationErrs1...) + } + // TODO(user): fill in your validation logic upon object creation. if len(validationErr) == 0 { return nil, nil @@ -268,3 +278,37 @@ func (r *ShardingDatabase) validateFreeEdition() field.ErrorList { } return nil } + +func (r *ShardingDatabase) validateShardName() field.ErrorList { + var validationErrs field.ErrorList + + for pindex := range r.Spec.Shard { + if len(r.Spec.Shard[pindex].Name) > 9 { + validationErrs = append(validationErrs, + field.Invalid(field.NewPath("spec").Child("shard").Child("Name"), r.Spec.Shard[pindex].Name, + "Shard Name cannot be greater than 9 characters.")) + } + } + + if len(validationErrs) > 0 { + return validationErrs + } + return nil +} + +func (r *ShardingDatabase) validateCatalogName() field.ErrorList { + var validationErrs field.ErrorList + + for pindex := range r.Spec.Catalog { + if len(r.Spec.Catalog[pindex].Name) > 9 { + validationErrs = append(validationErrs, + field.Invalid(field.NewPath("spec").Child("catalog").Child("Name"), r.Spec.Catalog[pindex].Name, + "Catalog Name cannot be greater than 9 characters.")) + } + } + + if len(validationErrs) > 0 { + return validationErrs + } + return nil +} diff --git a/apis/database/v1alpha1/singleinstancedatabase_conversion.go b/apis/database/v1alpha1/singleinstancedatabase_conversion.go new file mode 100644 index 00000000..76968dce --- /dev/null +++ b/apis/database/v1alpha1/singleinstancedatabase_conversion.go @@ -0,0 +1,14 @@ +package v1alpha1 + +import ( + "sigs.k8s.io/controller-runtime/pkg/conversion" +) + +func (src *SingleInstanceDatabase) ConvertTo(dst conversion.Hub) error { + return nil +} + +// ConvertFrom converts v1 to v1alpha1 +func (dst *SingleInstanceDatabase) ConvertFrom(src conversion.Hub) error { + return nil +} diff --git a/apis/database/v1alpha1/singleinstancedatabase_types.go b/apis/database/v1alpha1/singleinstancedatabase_types.go index 7c6c1ea5..36125d37 100644 --- a/apis/database/v1alpha1/singleinstancedatabase_types.go +++ b/apis/database/v1alpha1/singleinstancedatabase_types.go @@ -70,13 +70,13 @@ type SingleInstanceDatabaseSpec struct { EnableTCPS bool `json:"enableTCPS,omitempty"` TcpsCertRenewInterval string `json:"tcpsCertRenewInterval,omitempty"` TcpsTlsSecret string `json:"tcpsTlsSecret,omitempty"` - DgBrokerConfigured bool `json:"dgBrokerConfigured,omitempty"` PrimaryDatabaseRef string `json:"primaryDatabaseRef,omitempty"` - // +kubebuilder:validation:Enum=primary;standby;clone - CreateAs string `json:"createAs,omitempty"` - ReadinessCheckPeriod int `json:"readinessCheckPeriod,omitempty"` - ServiceAccountName string `json:"serviceAccountName,omitempty"` + // +kubebuilder:validation:Enum=primary;standby;clone;truecache + CreateAs string `json:"createAs,omitempty"` + ReadinessCheckPeriod int `json:"readinessCheckPeriod,omitempty"` + ServiceAccountName string `json:"serviceAccountName,omitempty"` + TrueCacheServices []string `json:"trueCacheServices,omitempty"` // +k8s:openapi-gen=true Replicas int `json:"replicas,omitempty"` @@ -87,6 +87,8 @@ type SingleInstanceDatabaseSpec struct { Persistence SingleInstanceDatabasePersistence `json:"persistence,omitempty"` InitParams *SingleInstanceDatabaseInitParams `json:"initParams,omitempty"` Resources SingleInstanceDatabaseResources `json:"resources,omitempty"` + + ConvertToSnapshotStandby bool `json:"convertToSnapshotStandby,omitempty"` } type SingleInstanceDatabaseResource struct { @@ -145,6 +147,7 @@ type SingleInstanceDatabaseStatus struct { Status string `json:"status,omitempty"` Replicas int `json:"replicas,omitempty"` ReleaseUpdate string `json:"releaseUpdate,omitempty"` + DgBroker *string `json:"dgBroker,omitempty"` // +kubebuilder:default:="false" DatafilesPatched string `json:"datafilesPatched,omitempty"` ConnectString string `json:"connectString,omitempty"` @@ -175,7 +178,6 @@ type SingleInstanceDatabaseStatus struct { CertRenewInterval string `json:"certRenewInterval,omitempty"` ClientWalletLoc string `json:"clientWalletLoc,omitempty"` PrimaryDatabase string `json:"primaryDatabase,omitempty"` - DgBrokerConfigured bool `json:"dgBrokerConfigured,omitempty"` // +kubebuilder:default:="" TcpsTlsSecret string `json:"tcpsTlsSecret"` @@ -187,6 +189,8 @@ type SingleInstanceDatabaseStatus struct { InitParams SingleInstanceDatabaseInitParams `json:"initParams,omitempty"` Persistence SingleInstanceDatabasePersistence `json:"persistence"` + + ConvertToSnapshotStandby bool `json:"convertToSnapshotStandby,omitempty"` } //+kubebuilder:object:root=true diff --git a/apis/database/v1alpha1/singleinstancedatabase_webhook.go b/apis/database/v1alpha1/singleinstancedatabase_webhook.go index 1a47207d..bc095f7c 100644 --- a/apis/database/v1alpha1/singleinstancedatabase_webhook.go +++ b/apis/database/v1alpha1/singleinstancedatabase_webhook.go @@ -135,6 +135,10 @@ func (r *SingleInstanceDatabase) Default() { r.Spec.Replicas = 1 } } + + if r.Spec.TrueCacheServices == nil { + r.Spec.TrueCacheServices = make([]string, 0) + } } // TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. @@ -293,6 +297,14 @@ func (r *SingleInstanceDatabase) ValidateCreate() (admission.Warnings, error) { } } + if r.Spec.CreateAs != "truecache" { + if len(r.Spec.TrueCacheServices) > 0 { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec").Child("trueCacheServices"), r.Spec.TrueCacheServices, + "Creation of trueCacheServices only supported with True Cache instances")) + } + } + if r.Status.FlashBack == "true" && r.Spec.FlashBack != nil && *r.Spec.FlashBack { if r.Spec.ArchiveLog != nil && !*r.Spec.ArchiveLog { allErrs = append(allErrs, @@ -470,7 +482,7 @@ func (r *SingleInstanceDatabase) ValidateUpdate(oldRuntimeObject runtime.Object) // if Db is in a dataguard configuration or referred by Standby databases then Restrict enabling Tcps on the Primary DB if r.Spec.EnableTCPS { - if old.Status.DgBrokerConfigured { + if old.Status.DgBroker != nil { allErrs = append(allErrs, field.Forbidden(field.NewPath("spec").Child("enableTCPS"), "cannot enable tcps as database is in a dataguard configuration")) } else if len(old.Status.StandbyDatabases) != 0 { @@ -509,6 +521,12 @@ func (r *SingleInstanceDatabase) ValidateUpdate(oldRuntimeObject runtime.Object) allErrs = append(allErrs, field.Forbidden(field.NewPath("spec").Child("persistence"), "uninstall ORDS to change Persistence")) } + + if old.Status.Replicas != r.Spec.Replicas && old.Status.DgBroker != nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("replicas"), "cannot be updated for a database in a Data Guard configuration")) + } + if len(allErrs) == 0 { return nil, nil } diff --git a/apis/database/v1alpha1/zz_generated.deepcopy.go b/apis/database/v1alpha1/zz_generated.deepcopy.go index 10b34ca7..d0426da8 100644 --- a/apis/database/v1alpha1/zz_generated.deepcopy.go +++ b/apis/database/v1alpha1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* ** Copyright (c) 2022 Oracle and/or its affiliates. @@ -50,18 +49,18 @@ import ( ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ACDSpec) DeepCopyInto(out *ACDSpec) { +func (in *AcdSpec) DeepCopyInto(out *AcdSpec) { *out = *in - in.K8sACD.DeepCopyInto(&out.K8sACD) - in.OCIACD.DeepCopyInto(&out.OCIACD) + in.K8sAcd.DeepCopyInto(&out.K8sAcd) + in.OciAcd.DeepCopyInto(&out.OciAcd) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACDSpec. -func (in *ACDSpec) DeepCopy() *ACDSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcdSpec. +func (in *AcdSpec) DeepCopy() *AcdSpec { if in == nil { return nil } - out := new(ACDSpec) + out := new(AcdSpec) in.DeepCopyInto(out) return out } @@ -327,15 +326,10 @@ func (in *AutonomousDatabaseBackupStatus) DeepCopy() *AutonomousDatabaseBackupSt } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AutonomousDatabaseDetails) DeepCopyInto(out *AutonomousDatabaseDetails) { +func (in *AutonomousDatabaseBase) DeepCopyInto(out *AutonomousDatabaseBase) { *out = *in - if in.AutonomousDatabaseOCID != nil { - in, out := &in.AutonomousDatabaseOCID, &out.AutonomousDatabaseOCID - *out = new(string) - **out = **in - } - if in.CompartmentOCID != nil { - in, out := &in.CompartmentOCID, &out.CompartmentOCID + if in.CompartmentId != nil { + in, out := &in.CompartmentId, &out.CompartmentId *out = new(string) **out = **in } @@ -360,11 +354,21 @@ func (in *AutonomousDatabaseDetails) DeepCopyInto(out *AutonomousDatabaseDetails *out = new(int) **out = **in } - if in.CPUCoreCount != nil { - in, out := &in.CPUCoreCount, &out.CPUCoreCount + if in.CpuCoreCount != nil { + in, out := &in.CpuCoreCount, &out.CpuCoreCount *out = new(int) **out = **in } + if in.ComputeCount != nil { + in, out := &in.ComputeCount, &out.ComputeCount + *out = new(float32) + **out = **in + } + if in.OcpuCount != nil { + in, out := &in.OcpuCount, &out.OcpuCount + *out = new(float32) + **out = **in + } in.AdminPassword.DeepCopyInto(&out.AdminPassword) if in.IsAutoScalingEnabled != nil { in, out := &in.IsAutoScalingEnabled, &out.IsAutoScalingEnabled @@ -376,7 +380,41 @@ func (in *AutonomousDatabaseDetails) DeepCopyInto(out *AutonomousDatabaseDetails *out = new(bool) **out = **in } - in.NetworkAccess.DeepCopyInto(&out.NetworkAccess) + if in.IsFreeTier != nil { + in, out := &in.IsFreeTier, &out.IsFreeTier + *out = new(bool) + **out = **in + } + if in.IsAccessControlEnabled != nil { + in, out := &in.IsAccessControlEnabled, &out.IsAccessControlEnabled + *out = new(bool) + **out = **in + } + if in.WhitelistedIps != nil { + in, out := &in.WhitelistedIps, &out.WhitelistedIps + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SubnetId != nil { + in, out := &in.SubnetId, &out.SubnetId + *out = new(string) + **out = **in + } + if in.NsgIds != nil { + in, out := &in.NsgIds, &out.NsgIds + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PrivateEndpointLabel != nil { + in, out := &in.PrivateEndpointLabel, &out.PrivateEndpointLabel + *out = new(string) + **out = **in + } + if in.IsMtlsConnectionRequired != nil { + in, out := &in.IsMtlsConnectionRequired, &out.IsMtlsConnectionRequired + *out = new(bool) + **out = **in + } if in.FreeformTags != nil { in, out := &in.FreeformTags, &out.FreeformTags *out = make(map[string]string, len(*in)) @@ -384,7 +422,43 @@ func (in *AutonomousDatabaseDetails) DeepCopyInto(out *AutonomousDatabaseDetails (*out)[key] = val } } - in.Wallet.DeepCopyInto(&out.Wallet) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseBase. +func (in *AutonomousDatabaseBase) DeepCopy() *AutonomousDatabaseBase { + if in == nil { + return nil + } + out := new(AutonomousDatabaseBase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseClone) DeepCopyInto(out *AutonomousDatabaseClone) { + *out = *in + in.AutonomousDatabaseBase.DeepCopyInto(&out.AutonomousDatabaseBase) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseClone. +func (in *AutonomousDatabaseClone) DeepCopy() *AutonomousDatabaseClone { + if in == nil { + return nil + } + out := new(AutonomousDatabaseClone) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseDetails) DeepCopyInto(out *AutonomousDatabaseDetails) { + *out = *in + in.AutonomousDatabaseBase.DeepCopyInto(&out.AutonomousDatabaseBase) + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseDetails. @@ -525,7 +599,9 @@ func (in *AutonomousDatabaseRestoreStatus) DeepCopy() *AutonomousDatabaseRestore func (in *AutonomousDatabaseSpec) DeepCopyInto(out *AutonomousDatabaseSpec) { *out = *in in.Details.DeepCopyInto(&out.Details) - in.OCIConfig.DeepCopyInto(&out.OCIConfig) + in.Clone.DeepCopyInto(&out.Clone) + in.Wallet.DeepCopyInto(&out.Wallet) + in.OciConfig.DeepCopyInto(&out.OciConfig) if in.HardLink != nil { in, out := &in.HardLink, &out.HardLink *out = new(bool) @@ -607,205 +683,6 @@ func (in *Backupconfig) DeepCopy() *Backupconfig { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CDB) DeepCopyInto(out *CDB) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDB. -func (in *CDB) DeepCopy() *CDB { - if in == nil { - return nil - } - out := new(CDB) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CDB) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CDBAdminPassword) DeepCopyInto(out *CDBAdminPassword) { - *out = *in - out.Secret = in.Secret -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBAdminPassword. -func (in *CDBAdminPassword) DeepCopy() *CDBAdminPassword { - if in == nil { - return nil - } - out := new(CDBAdminPassword) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CDBAdminUser) DeepCopyInto(out *CDBAdminUser) { - *out = *in - out.Secret = in.Secret -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBAdminUser. -func (in *CDBAdminUser) DeepCopy() *CDBAdminUser { - if in == nil { - return nil - } - out := new(CDBAdminUser) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CDBList) DeepCopyInto(out *CDBList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CDB, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBList. -func (in *CDBList) DeepCopy() *CDBList { - if in == nil { - return nil - } - out := new(CDBList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CDBList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CDBSecret) DeepCopyInto(out *CDBSecret) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBSecret. -func (in *CDBSecret) DeepCopy() *CDBSecret { - if in == nil { - return nil - } - out := new(CDBSecret) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CDBSpec) DeepCopyInto(out *CDBSpec) { - *out = *in - out.SysAdminPwd = in.SysAdminPwd - out.CDBAdminUser = in.CDBAdminUser - out.CDBAdminPwd = in.CDBAdminPwd - out.CDBTlsKey = in.CDBTlsKey - out.CDBTlsCrt = in.CDBTlsCrt - out.ORDSPwd = in.ORDSPwd - out.WebServerUser = in.WebServerUser - out.WebServerPwd = in.WebServerPwd - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBSpec. -func (in *CDBSpec) DeepCopy() *CDBSpec { - if in == nil { - return nil - } - out := new(CDBSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CDBStatus) DeepCopyInto(out *CDBStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBStatus. -func (in *CDBStatus) DeepCopy() *CDBStatus { - if in == nil { - return nil - } - out := new(CDBStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CDBSysAdminPassword) DeepCopyInto(out *CDBSysAdminPassword) { - *out = *in - out.Secret = in.Secret -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBSysAdminPassword. -func (in *CDBSysAdminPassword) DeepCopy() *CDBSysAdminPassword { - if in == nil { - return nil - } - out := new(CDBSysAdminPassword) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CDBTLSCRT) DeepCopyInto(out *CDBTLSCRT) { - *out = *in - out.Secret = in.Secret -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBTLSCRT. -func (in *CDBTLSCRT) DeepCopy() *CDBTLSCRT { - if in == nil { - return nil - } - out := new(CDBTLSCRT) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CDBTLSKEY) DeepCopyInto(out *CDBTLSKEY) { - *out = *in - out.Secret = in.Secret -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBTLSKEY. -func (in *CDBTLSKEY) DeepCopy() *CDBTLSKEY { - if in == nil { - return nil - } - out := new(CDBTLSKEY) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CatalogSpec) DeepCopyInto(out *CatalogSpec) { *out = *in @@ -898,7 +775,7 @@ func (in *DataguardBroker) DeepCopyInto(out *DataguardBroker) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataguardBroker. @@ -919,26 +796,6 @@ func (in *DataguardBroker) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DataguardBrokerFastStartFailOver) DeepCopyInto(out *DataguardBrokerFastStartFailOver) { - *out = *in - if in.Strategy != nil { - in, out := &in.Strategy, &out.Strategy - *out = make([]DataguardBrokerStrategy, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataguardBrokerFastStartFailOver. -func (in *DataguardBrokerFastStartFailOver) DeepCopy() *DataguardBrokerFastStartFailOver { - if in == nil { - return nil - } - out := new(DataguardBrokerFastStartFailOver) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DataguardBrokerList) DeepCopyInto(out *DataguardBrokerList) { *out = *in @@ -993,7 +850,6 @@ func (in *DataguardBrokerSpec) DeepCopyInto(out *DataguardBrokerSpec) { (*out)[key] = val } } - in.FastStartFailOver.DeepCopyInto(&out.FastStartFailOver) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataguardBrokerSpec. @@ -1009,6 +865,13 @@ func (in *DataguardBrokerSpec) DeepCopy() *DataguardBrokerSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DataguardBrokerStatus) DeepCopyInto(out *DataguardBrokerStatus) { *out = *in + if in.DatabasesInDataguardConfig != nil { + in, out := &in.DatabasesInDataguardConfig, &out.DatabasesInDataguardConfig + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataguardBrokerStatus. @@ -1022,16 +885,46 @@ func (in *DataguardBrokerStatus) DeepCopy() *DataguardBrokerStatus { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DataguardBrokerStrategy) DeepCopyInto(out *DataguardBrokerStrategy) { +func (in *DbCloneConfig) DeepCopyInto(out *DbCloneConfig) { *out = *in + if in.SshPublicKeys != nil { + in, out := &in.SshPublicKeys, &out.SshPublicKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbCloneConfig. +func (in *DbCloneConfig) DeepCopy() *DbCloneConfig { + if in == nil { + return nil + } + out := new(DbCloneConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbCloneStatus) DeepCopyInto(out *DbCloneStatus) { + *out = *in + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } + if in.SshPublicKeys != nil { + in, out := &in.SshPublicKeys, &out.SshPublicKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataguardBrokerStrategy. -func (in *DataguardBrokerStrategy) DeepCopy() *DataguardBrokerStrategy { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbCloneStatus. +func (in *DbCloneStatus) DeepCopy() *DbCloneStatus { if in == nil { return nil } - out := new(DataguardBrokerStrategy) + out := new(DbCloneStatus) in.DeepCopyInto(out) return out } @@ -1082,6 +975,7 @@ func (in *DbSystemDetails) DeepCopyInto(out *DbSystemDetails) { } } in.DbBackupConfig.DeepCopyInto(&out.DbBackupConfig) + out.KMSConfig = in.KMSConfig } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbSystemDetails. @@ -1187,6 +1081,39 @@ func (in *DbcsSystemSpec) DeepCopyInto(out *DbcsSystemSpec) { *out = new(string) **out = **in } + if in.OCIConfigMap != nil { + in, out := &in.OCIConfigMap, &out.OCIConfigMap + *out = new(string) + **out = **in + } + if in.OCISecret != nil { + in, out := &in.OCISecret, &out.OCISecret + *out = new(string) + **out = **in + } + if in.DbClone != nil { + in, out := &in.DbClone, &out.DbClone + *out = new(DbCloneConfig) + (*in).DeepCopyInto(*out) + } + if in.PdbConfigs != nil { + in, out := &in.PdbConfigs, &out.PdbConfigs + *out = make([]PDBConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DbBackupId != nil { + in, out := &in.DbBackupId, &out.DbBackupId + *out = new(string) + **out = **in + } + if in.DatabaseId != nil { + in, out := &in.DatabaseId, &out.DatabaseId + *out = new(string) + **out = **in + } + out.KMSConfig = in.KMSConfig } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbcsSystemSpec. @@ -1242,6 +1169,15 @@ func (in *DbcsSystemStatus) DeepCopyInto(out *DbcsSystemStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + out.KMSDetailsStatus = in.KMSDetailsStatus + in.DbCloneStatus.DeepCopyInto(&out.DbCloneStatus) + if in.PdbDetailsStatus != nil { + in, out := &in.PdbDetailsStatus, &out.PdbDetailsStatus + *out = make([]PDBDetailsStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbcsSystemStatus. @@ -1349,6 +1285,13 @@ func (in *GsmSpec) DeepCopyInto(out *GsmSpec) { (*out)[key] = val } } + if in.PvAnnotations != nil { + in, out := &in.PvAnnotations, &out.PvAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } if in.PvMatchLabels != nil { in, out := &in.PvMatchLabels, &out.PvMatchLabels *out = make(map[string]string, len(*in)) @@ -1418,7 +1361,7 @@ func (in *GsmStatusDetails) DeepCopy() *GsmStatusDetails { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *K8sACDSpec) DeepCopyInto(out *K8sACDSpec) { +func (in *K8sAcdSpec) DeepCopyInto(out *K8sAcdSpec) { *out = *in if in.Name != nil { in, out := &in.Name, &out.Name @@ -1427,18 +1370,18 @@ func (in *K8sACDSpec) DeepCopyInto(out *K8sACDSpec) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sACDSpec. -func (in *K8sACDSpec) DeepCopy() *K8sACDSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sAcdSpec. +func (in *K8sAcdSpec) DeepCopy() *K8sAcdSpec { if in == nil { return nil } - out := new(K8sACDSpec) + out := new(K8sAcdSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *K8sADBBackupSpec) DeepCopyInto(out *K8sADBBackupSpec) { +func (in *K8sAdbBackupSpec) DeepCopyInto(out *K8sAdbBackupSpec) { *out = *in if in.Name != nil { in, out := &in.Name, &out.Name @@ -1447,18 +1390,18 @@ func (in *K8sADBBackupSpec) DeepCopyInto(out *K8sADBBackupSpec) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sADBBackupSpec. -func (in *K8sADBBackupSpec) DeepCopy() *K8sADBBackupSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sAdbBackupSpec. +func (in *K8sAdbBackupSpec) DeepCopy() *K8sAdbBackupSpec { if in == nil { return nil } - out := new(K8sADBBackupSpec) + out := new(K8sAdbBackupSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *K8sADBSpec) DeepCopyInto(out *K8sADBSpec) { +func (in *K8sAdbSpec) DeepCopyInto(out *K8sAdbSpec) { *out = *in if in.Name != nil { in, out := &in.Name, &out.Name @@ -1467,12 +1410,12 @@ func (in *K8sADBSpec) DeepCopyInto(out *K8sADBSpec) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sADBSpec. -func (in *K8sADBSpec) DeepCopy() *K8sADBSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sAdbSpec. +func (in *K8sAdbSpec) DeepCopy() *K8sAdbSpec { if in == nil { return nil } - out := new(K8sADBSpec) + out := new(K8sAdbSpec) in.DeepCopyInto(out) return out } @@ -1498,78 +1441,77 @@ func (in *K8sSecretSpec) DeepCopy() *K8sSecretSpec { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkAccessSpec) DeepCopyInto(out *NetworkAccessSpec) { +func (in *KMSConfig) DeepCopyInto(out *KMSConfig) { *out = *in - if in.IsAccessControlEnabled != nil { - in, out := &in.IsAccessControlEnabled, &out.IsAccessControlEnabled - *out = new(bool) - **out = **in - } - if in.AccessControlList != nil { - in, out := &in.AccessControlList, &out.AccessControlList - *out = make([]string, len(*in)) - copy(*out, *in) - } - in.PrivateEndpoint.DeepCopyInto(&out.PrivateEndpoint) - if in.IsMTLSConnectionRequired != nil { - in, out := &in.IsMTLSConnectionRequired, &out.IsMTLSConnectionRequired - *out = new(bool) - **out = **in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSConfig. +func (in *KMSConfig) DeepCopy() *KMSConfig { + if in == nil { + return nil } + out := new(KMSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KMSDetailsStatus) DeepCopyInto(out *KMSDetailsStatus) { + *out = *in } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkAccessSpec. -func (in *NetworkAccessSpec) DeepCopy() *NetworkAccessSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSDetailsStatus. +func (in *KMSDetailsStatus) DeepCopy() *KMSDetailsStatus { if in == nil { return nil } - out := new(NetworkAccessSpec) + out := new(KMSDetailsStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OCIACDSpec) DeepCopyInto(out *OCIACDSpec) { +func (in *OciAcdSpec) DeepCopyInto(out *OciAcdSpec) { *out = *in - if in.OCID != nil { - in, out := &in.OCID, &out.OCID + if in.Id != nil { + in, out := &in.Id, &out.Id *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIACDSpec. -func (in *OCIACDSpec) DeepCopy() *OCIACDSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OciAcdSpec. +func (in *OciAcdSpec) DeepCopy() *OciAcdSpec { if in == nil { return nil } - out := new(OCIACDSpec) + out := new(OciAcdSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OCIADBSpec) DeepCopyInto(out *OCIADBSpec) { +func (in *OciAdbSpec) DeepCopyInto(out *OciAdbSpec) { *out = *in - if in.OCID != nil { - in, out := &in.OCID, &out.OCID + if in.Ocid != nil { + in, out := &in.Ocid, &out.Ocid *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIADBSpec. -func (in *OCIADBSpec) DeepCopy() *OCIADBSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OciAdbSpec. +func (in *OciAdbSpec) DeepCopy() *OciAdbSpec { if in == nil { return nil } - out := new(OCIADBSpec) + out := new(OciAdbSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OCIConfigSpec) DeepCopyInto(out *OCIConfigSpec) { +func (in *OciConfigSpec) DeepCopyInto(out *OciConfigSpec) { *out = *in if in.ConfigMapName != nil { in, out := &in.ConfigMapName, &out.ConfigMapName @@ -1583,48 +1525,32 @@ func (in *OCIConfigSpec) DeepCopyInto(out *OCIConfigSpec) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIConfigSpec. -func (in *OCIConfigSpec) DeepCopy() *OCIConfigSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OciConfigSpec. +func (in *OciConfigSpec) DeepCopy() *OciConfigSpec { if in == nil { return nil } - out := new(OCIConfigSpec) + out := new(OciConfigSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OCISecretSpec) DeepCopyInto(out *OCISecretSpec) { +func (in *OciSecretSpec) DeepCopyInto(out *OciSecretSpec) { *out = *in - if in.OCID != nil { - in, out := &in.OCID, &out.OCID + if in.Id != nil { + in, out := &in.Id, &out.Id *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCISecretSpec. -func (in *OCISecretSpec) DeepCopy() *OCISecretSpec { - if in == nil { - return nil - } - out := new(OCISecretSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ORDSPassword) DeepCopyInto(out *ORDSPassword) { - *out = *in - out.Secret = in.Secret -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ORDSPassword. -func (in *ORDSPassword) DeepCopy() *ORDSPassword { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OciSecretSpec. +func (in *OciSecretSpec) DeepCopy() *OciSecretSpec { if in == nil { return nil } - out := new(ORDSPassword) + out := new(OciSecretSpec) in.DeepCopyInto(out) return out } @@ -1726,6 +1652,11 @@ func (in *OracleRestDataServicePassword) DeepCopy() *OracleRestDataServicePasswo // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OracleRestDataServicePersistence) DeepCopyInto(out *OracleRestDataServicePersistence) { *out = *in + if in.SetWritePermissions != nil { + in, out := &in.SetWritePermissions, &out.SetWritePermissions + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OracleRestDataServicePersistence. @@ -1772,14 +1703,13 @@ func (in *OracleRestDataServiceSpec) DeepCopyInto(out *OracleRestDataServiceSpec } out.Image = in.Image in.OrdsPassword.DeepCopyInto(&out.OrdsPassword) - in.ApexPassword.DeepCopyInto(&out.ApexPassword) in.AdminPassword.DeepCopyInto(&out.AdminPassword) if in.RestEnableSchemas != nil { in, out := &in.RestEnableSchemas, &out.RestEnableSchemas *out = make([]OracleRestDataServiceRestEnableSchemas, len(*in)) copy(*out, *in) } - out.Persistence = in.Persistence + in.Persistence.DeepCopyInto(&out.Persistence) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OracleRestDataServiceSpec. @@ -1809,230 +1739,135 @@ func (in *OracleRestDataServiceStatus) DeepCopy() *OracleRestDataServiceStatus { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PDB) DeepCopyInto(out *PDB) { +func (in *PDBConfig) DeepCopyInto(out *PDBConfig) { *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDB. -func (in *PDB) DeepCopy() *PDB { - if in == nil { - return nil + if in.PdbName != nil { + in, out := &in.PdbName, &out.PdbName + *out = new(string) + **out = **in } - out := new(PDB) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PDB) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c + if in.PdbAdminPassword != nil { + in, out := &in.PdbAdminPassword, &out.PdbAdminPassword + *out = new(string) + **out = **in } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PDBAdminName) DeepCopyInto(out *PDBAdminName) { - *out = *in - out.Secret = in.Secret -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBAdminName. -func (in *PDBAdminName) DeepCopy() *PDBAdminName { - if in == nil { - return nil + if in.TdeWalletPassword != nil { + in, out := &in.TdeWalletPassword, &out.TdeWalletPassword + *out = new(string) + **out = **in } - out := new(PDBAdminName) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PDBAdminPassword) DeepCopyInto(out *PDBAdminPassword) { - *out = *in - out.Secret = in.Secret -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBAdminPassword. -func (in *PDBAdminPassword) DeepCopy() *PDBAdminPassword { - if in == nil { - return nil + if in.ShouldPdbAdminAccountBeLocked != nil { + in, out := &in.ShouldPdbAdminAccountBeLocked, &out.ShouldPdbAdminAccountBeLocked + *out = new(bool) + **out = **in } - out := new(PDBAdminPassword) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PDBList) DeepCopyInto(out *PDBList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PDB, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) + if in.FreeformTags != nil { + in, out := &in.FreeformTags, &out.FreeformTags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val } } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBList. -func (in *PDBList) DeepCopy() *PDBList { - if in == nil { - return nil + if in.IsDelete != nil { + in, out := &in.IsDelete, &out.IsDelete + *out = new(bool) + **out = **in } - out := new(PDBList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PDBList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c + if in.PluggableDatabaseId != nil { + in, out := &in.PluggableDatabaseId, &out.PluggableDatabaseId + *out = new(string) + **out = **in } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PDBSecret) DeepCopyInto(out *PDBSecret) { - *out = *in } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBSecret. -func (in *PDBSecret) DeepCopy() *PDBSecret { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBConfig. +func (in *PDBConfig) DeepCopy() *PDBConfig { if in == nil { return nil } - out := new(PDBSecret) + out := new(PDBConfig) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PDBSpec) DeepCopyInto(out *PDBSpec) { +func (in *PDBConfigStatus) DeepCopyInto(out *PDBConfigStatus) { *out = *in - out.PDBTlsKey = in.PDBTlsKey - out.PDBTlsCrt = in.PDBTlsCrt - out.PDBTlsCat = in.PDBTlsCat - out.AdminName = in.AdminName - out.AdminPwd = in.AdminPwd - out.WebServerUsr = in.WebServerUsr - out.WebServerPwd = in.WebServerPwd - if in.ReuseTempFile != nil { - in, out := &in.ReuseTempFile, &out.ReuseTempFile - *out = new(bool) - **out = **in - } - if in.UnlimitedStorage != nil { - in, out := &in.UnlimitedStorage, &out.UnlimitedStorage - *out = new(bool) - **out = **in - } - if in.AsClone != nil { - in, out := &in.AsClone, &out.AsClone - *out = new(bool) + if in.PdbName != nil { + in, out := &in.PdbName, &out.PdbName + *out = new(string) **out = **in } - if in.TDEImport != nil { - in, out := &in.TDEImport, &out.TDEImport + if in.ShouldPdbAdminAccountBeLocked != nil { + in, out := &in.ShouldPdbAdminAccountBeLocked, &out.ShouldPdbAdminAccountBeLocked *out = new(bool) **out = **in } - if in.TDEExport != nil { - in, out := &in.TDEExport, &out.TDEExport - *out = new(bool) - **out = **in + if in.FreeformTags != nil { + in, out := &in.FreeformTags, &out.FreeformTags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } } - out.TDEPassword = in.TDEPassword - out.TDESecret = in.TDESecret - if in.GetScript != nil { - in, out := &in.GetScript, &out.GetScript - *out = new(bool) + if in.PluggableDatabaseId != nil { + in, out := &in.PluggableDatabaseId, &out.PluggableDatabaseId + *out = new(string) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBSpec. -func (in *PDBSpec) DeepCopy() *PDBSpec { - if in == nil { - return nil - } - out := new(PDBSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PDBStatus) DeepCopyInto(out *PDBStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBStatus. -func (in *PDBStatus) DeepCopy() *PDBStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBConfigStatus. +func (in *PDBConfigStatus) DeepCopy() *PDBConfigStatus { if in == nil { return nil } - out := new(PDBStatus) + out := new(PDBConfigStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PDBTLSCAT) DeepCopyInto(out *PDBTLSCAT) { +func (in *PDBDetailsStatus) DeepCopyInto(out *PDBDetailsStatus) { *out = *in - out.Secret = in.Secret -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBTLSCAT. -func (in *PDBTLSCAT) DeepCopy() *PDBTLSCAT { - if in == nil { - return nil + if in.PDBConfigStatus != nil { + in, out := &in.PDBConfigStatus, &out.PDBConfigStatus + *out = make([]PDBConfigStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } - out := new(PDBTLSCAT) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PDBTLSCRT) DeepCopyInto(out *PDBTLSCRT) { - *out = *in - out.Secret = in.Secret } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBTLSCRT. -func (in *PDBTLSCRT) DeepCopy() *PDBTLSCRT { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBDetailsStatus. +func (in *PDBDetailsStatus) DeepCopy() *PDBDetailsStatus { if in == nil { return nil } - out := new(PDBTLSCRT) + out := new(PDBDetailsStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PDBTLSKEY) DeepCopyInto(out *PDBTLSKEY) { +func (in *PasswordSpec) DeepCopyInto(out *PasswordSpec) { *out = *in - out.Secret = in.Secret + in.K8sSecret.DeepCopyInto(&out.K8sSecret) + in.OciSecret.DeepCopyInto(&out.OciSecret) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBTLSKEY. -func (in *PDBTLSKEY) DeepCopy() *PDBTLSKEY { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PasswordSpec. +func (in *PasswordSpec) DeepCopy() *PasswordSpec { if in == nil { return nil } - out := new(PDBTLSKEY) + out := new(PasswordSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PITSpec) DeepCopyInto(out *PITSpec) { +func (in *PitSpec) DeepCopyInto(out *PitSpec) { *out = *in if in.Timestamp != nil { in, out := &in.Timestamp, &out.Timestamp @@ -2041,29 +1876,12 @@ func (in *PITSpec) DeepCopyInto(out *PITSpec) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PITSpec. -func (in *PITSpec) DeepCopy() *PITSpec { - if in == nil { - return nil - } - out := new(PITSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PasswordSpec) DeepCopyInto(out *PasswordSpec) { - *out = *in - in.K8sSecret.DeepCopyInto(&out.K8sSecret) - in.OCISecret.DeepCopyInto(&out.OCISecret) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PasswordSpec. -func (in *PasswordSpec) DeepCopy() *PasswordSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PitSpec. +func (in *PitSpec) DeepCopy() *PitSpec { if in == nil { return nil } - out := new(PasswordSpec) + out := new(PitSpec) in.DeepCopyInto(out) return out } @@ -2083,36 +1901,6 @@ func (in *PortMapping) DeepCopy() *PortMapping { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PrivateEndpointSpec) DeepCopyInto(out *PrivateEndpointSpec) { - *out = *in - if in.SubnetOCID != nil { - in, out := &in.SubnetOCID, &out.SubnetOCID - *out = new(string) - **out = **in - } - if in.NsgOCIDs != nil { - in, out := &in.NsgOCIDs, &out.NsgOCIDs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.HostnamePrefix != nil { - in, out := &in.HostnamePrefix, &out.HostnamePrefix - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointSpec. -func (in *PrivateEndpointSpec) DeepCopy() *PrivateEndpointSpec { - if in == nil { - return nil - } - out := new(PrivateEndpointSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SecretDetails) DeepCopyInto(out *SecretDetails) { *out = *in @@ -2535,6 +2323,11 @@ func (in *SingleInstanceDatabaseSpec) DeepCopyInto(out *SingleInstanceDatabaseSp *out = new(bool) **out = **in } + if in.TrueCacheServices != nil { + in, out := &in.TrueCacheServices, &out.TrueCacheServices + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.NodeSelector != nil { in, out := &in.NodeSelector, &out.NodeSelector *out = make(map[string]string, len(*in)) @@ -2571,6 +2364,11 @@ func (in *SingleInstanceDatabaseStatus) DeepCopyInto(out *SingleInstanceDatabase *out = make([]string, len(*in)) copy(*out, *in) } + if in.DgBroker != nil { + in, out := &in.DgBroker, &out.DgBroker + *out = new(string) + **out = **in + } if in.StandbyDatabases != nil { in, out := &in.StandbyDatabases, &out.StandbyDatabases *out = make(map[string]string, len(*in)) @@ -2602,7 +2400,7 @@ func (in *SingleInstanceDatabaseStatus) DeepCopy() *SingleInstanceDatabaseStatus // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SourceSpec) DeepCopyInto(out *SourceSpec) { *out = *in - in.K8sADBBackup.DeepCopyInto(&out.K8sADBBackup) + in.K8sAdbBackup.DeepCopyInto(&out.K8sAdbBackup) in.PointInTime.DeepCopyInto(&out.PointInTime) } @@ -2616,43 +2414,11 @@ func (in *SourceSpec) DeepCopy() *SourceSpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TDEPwd) DeepCopyInto(out *TDEPwd) { - *out = *in - out.Secret = in.Secret -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TDEPwd. -func (in *TDEPwd) DeepCopy() *TDEPwd { - if in == nil { - return nil - } - out := new(TDEPwd) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TDESecret) DeepCopyInto(out *TDESecret) { - *out = *in - out.Secret = in.Secret -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TDESecret. -func (in *TDESecret) DeepCopy() *TDESecret { - if in == nil { - return nil - } - out := new(TDESecret) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TargetSpec) DeepCopyInto(out *TargetSpec) { *out = *in - in.K8sADB.DeepCopyInto(&out.K8sADB) - in.OCIADB.DeepCopyInto(&out.OCIADB) + in.K8sAdb.DeepCopyInto(&out.K8sAdb) + in.OciAdb.DeepCopyInto(&out.OciAdb) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetSpec. @@ -2720,67 +2486,3 @@ func (in *WalletSpec) DeepCopy() *WalletSpec { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WebServerPassword) DeepCopyInto(out *WebServerPassword) { - *out = *in - out.Secret = in.Secret -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebServerPassword. -func (in *WebServerPassword) DeepCopy() *WebServerPassword { - if in == nil { - return nil - } - out := new(WebServerPassword) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WebServerPasswordPDB) DeepCopyInto(out *WebServerPasswordPDB) { - *out = *in - out.Secret = in.Secret -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebServerPasswordPDB. -func (in *WebServerPasswordPDB) DeepCopy() *WebServerPasswordPDB { - if in == nil { - return nil - } - out := new(WebServerPasswordPDB) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WebServerUser) DeepCopyInto(out *WebServerUser) { - *out = *in - out.Secret = in.Secret -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebServerUser. -func (in *WebServerUser) DeepCopy() *WebServerUser { - if in == nil { - return nil - } - out := new(WebServerUser) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WebServerUserPDB) DeepCopyInto(out *WebServerUserPDB) { - *out = *in - out.Secret = in.Secret -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebServerUserPDB. -func (in *WebServerUserPDB) DeepCopy() *WebServerUserPDB { - if in == nil { - return nil - } - out := new(WebServerUserPDB) - in.DeepCopyInto(out) - return out -} diff --git a/apis/database/v4/adbfamily_common_spec.go b/apis/database/v4/adbfamily_common_spec.go new file mode 100644 index 00000000..87434852 --- /dev/null +++ b/apis/database/v4/adbfamily_common_spec.go @@ -0,0 +1,67 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +// LastSuccessfulSpec is an annotation key which maps to the value of last successful spec +const LastSuccessfulSpec string = "lastSuccessfulSpec" + +/************************ +* OCI config +************************/ +type OciConfigSpec struct { + ConfigMapName *string `json:"configMapName,omitempty"` + SecretName *string `json:"secretName,omitempty"` +} + +/************************ +* ADB spec +************************/ +type K8sAdbSpec struct { + Name *string `json:"name,omitempty"` +} + +type OciAdbSpec struct { + OCID *string `json:"ocid,omitempty"` +} + +// TargetSpec defines the spec of the target for backup/restore runs. +type TargetSpec struct { + K8sAdb K8sAdbSpec `json:"k8sADB,omitempty"` + OciAdb OciAdbSpec `json:"ociADB,omitempty"` +} diff --git a/apis/database/v1alpha1/adbfamily_common_utils.go b/apis/database/v4/adbfamily_utils.go similarity index 87% rename from apis/database/v1alpha1/adbfamily_common_utils.go rename to apis/database/v4/adbfamily_utils.go index d4d3ae9f..380dab35 100644 --- a/apis/database/v1alpha1/adbfamily_common_utils.go +++ b/apis/database/v4/adbfamily_utils.go @@ -36,61 +36,20 @@ ** SOFTWARE. */ -package v1alpha1 +package v4 import ( "errors" "reflect" "time" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/oracle/oci-go-sdk/v65/common" "github.com/oracle/oci-go-sdk/v65/database" "github.com/oracle/oci-go-sdk/v65/workrequests" ) -// LastSuccessfulSpec is an annotation key which maps to the value of last successful spec -const LastSuccessfulSpec string = "lastSuccessfulSpec" - -// File the meta condition and return the meta view -func CreateMetaCondition(obj client.Object, err error, lifecycleState string, stateMsg string) metav1.Condition { - - return metav1.Condition{ - Type: lifecycleState, - LastTransitionTime: metav1.Now(), - ObservedGeneration: obj.GetGeneration(), - Reason: stateMsg, - Message: err.Error(), - Status: metav1.ConditionTrue, - } -} - -/************************ -* OCI config -************************/ -type OCIConfigSpec struct { - ConfigMapName *string `json:"configMapName,omitempty"` - SecretName *string `json:"secretName,omitempty"` -} - -/************************ -* ADB spec -************************/ -type K8sADBSpec struct { - Name *string `json:"name,omitempty"` -} - -type OCIADBSpec struct { - OCID *string `json:"ocid,omitempty"` -} - -// TargetSpec defines the spec of the target for backup/restore runs. -type TargetSpec struct { - K8sADB K8sADBSpec `json:"k8sADB,omitempty"` - OCIADB OCIADBSpec `json:"ociADB,omitempty"` -} +// This file contains the util functions that are shared by specs in both +// apis/database/v1alpha1 and apis/database/v4. /************************** * Remove Unchanged Fields @@ -99,7 +58,7 @@ type TargetSpec struct { // removeUnchangedFields removes the unchanged fields in the struct and returns if the struct is changed. // lastSpec should be a derefereced struct that is the last successful spec, e.g. AutonomousDatabaseSpec. // curSpec should be a pointer pointing to the struct that is being proccessed, e.g., *AutonomousDatabaseSpec. -func removeUnchangedFields(lastSpec interface{}, curSpec interface{}) (bool, error) { +func RemoveUnchangedFields(lastSpec interface{}, curSpec interface{}) (bool, error) { if reflect.ValueOf(lastSpec).Kind() != reflect.Struct { return false, errors.New("lastSpec should be a struct") } @@ -217,7 +176,7 @@ func FormatSDKTime(sdkTime *common.SDKTime) string { return time.Format(displayFormat) } -func parseDisplayTime(val string) (*common.SDKTime, error) { +func ParseDisplayTime(val string) (*common.SDKTime, error) { parsedTime, err := time.Parse(displayFormat, val) if err != nil { return nil, err @@ -229,7 +188,7 @@ func parseDisplayTime(val string) (*common.SDKTime, error) { /************************ * LifecycleState check ************************/ -func IsADBIntermediateState(state database.AutonomousDatabaseLifecycleStateEnum) bool { +func IsAdbIntermediateState(state database.AutonomousDatabaseLifecycleStateEnum) bool { if state == database.AutonomousDatabaseLifecycleStateProvisioning || state == database.AutonomousDatabaseLifecycleStateUpdating || state == database.AutonomousDatabaseLifecycleStateScaleInProgress || @@ -248,7 +207,7 @@ func IsADBIntermediateState(state database.AutonomousDatabaseLifecycleStateEnum) return false } -func ValidADBTerminateState(state database.AutonomousDatabaseLifecycleStateEnum) bool { +func CanBeTerminated(state database.AutonomousDatabaseLifecycleStateEnum) bool { if state == database.AutonomousDatabaseLifecycleStateProvisioning || state == database.AutonomousDatabaseLifecycleStateAvailable || state == database.AutonomousDatabaseLifecycleStateStopped || diff --git a/apis/database/v4/autonomouscontainerdatabase_types.go b/apis/database/v4/autonomouscontainerdatabase_types.go new file mode 100644 index 00000000..be9cc615 --- /dev/null +++ b/apis/database/v4/autonomouscontainerdatabase_types.go @@ -0,0 +1,226 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + "encoding/json" + "reflect" + + "github.com/oracle/oci-go-sdk/v65/database" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// name of our custom finalizer +const ACDFinalizer = "database.oracle.com/acd-finalizer" + +type AcdActionEnum string + +const ( + AcdActionBlank AcdActionEnum = "" + AcdActionRestart AcdActionEnum = "RESTART" + AcdActionTerminate AcdActionEnum = "TERMINATE" +) + +func GetAcdActionEnumFromString(val string) (AcdActionEnum, bool) { + var mappingAcdActionEnum = map[string]AcdActionEnum{ + "RESTART": AcdActionRestart, + "TERMINATE": AcdActionTerminate, + "": AcdActionBlank, + } + + enum, ok := mappingAcdActionEnum[val] + return enum, ok +} + +// AutonomousContainerDatabaseSpec defines the desired state of AutonomousContainerDatabase +type AutonomousContainerDatabaseSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + AutonomousContainerDatabaseOCID *string `json:"autonomousContainerDatabaseOCID,omitempty"` + CompartmentOCID *string `json:"compartmentOCID,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + AutonomousExadataVMClusterOCID *string `json:"autonomousExadataVMClusterOCID,omitempty"` + // +kubebuilder:validation:Enum:="RELEASE_UPDATES";"RELEASE_UPDATE_REVISIONS" + PatchModel database.AutonomousContainerDatabasePatchModelEnum `json:"patchModel,omitempty"` + // +kubebuilder:validation:Enum:="SYNC";"RESTART";"TERMINATE" + Action AcdActionEnum `json:"action,omitempty"` + FreeformTags map[string]string `json:"freeformTags,omitempty"` + + OCIConfig OciConfigSpec `json:"ociConfig,omitempty"` + // +kubebuilder:default:=false + HardLink *bool `json:"hardLink,omitempty"` +} + +// AutonomousContainerDatabaseStatus defines the observed state of AutonomousContainerDatabase +type AutonomousContainerDatabaseStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + LifecycleState database.AutonomousContainerDatabaseLifecycleStateEnum `json:"lifecycleState"` + TimeCreated string `json:"timeCreated,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:resource:shortName="acd";"acds" +// +kubebuilder:printcolumn:JSONPath=".spec.displayName",name="DisplayName",type=string +// +kubebuilder:printcolumn:JSONPath=".status.lifecycleState",name="State",type=string +// +kubebuilder:printcolumn:JSONPath=".status.timeCreated",name="Created",type=string +// +kubebuilder:storageversion + +// AutonomousContainerDatabase is the Schema for the autonomouscontainerdatabases API +type AutonomousContainerDatabase struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AutonomousContainerDatabaseSpec `json:"spec,omitempty"` + Status AutonomousContainerDatabaseStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// AutonomousContainerDatabaseList contains a list of AutonomousContainerDatabase +type AutonomousContainerDatabaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AutonomousContainerDatabase `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AutonomousContainerDatabase{}, &AutonomousContainerDatabaseList{}) +} + +// Implement conversion.Hub interface, which means any resource version can convert into v4 +func (*AutonomousContainerDatabase) Hub() {} + +// GetLastSuccessfulSpec returns spec from the lass successful reconciliation. +// Returns nil, nil if there is no lastSuccessfulSpec. +func (acd *AutonomousContainerDatabase) GetLastSuccessfulSpec() (*AutonomousContainerDatabaseSpec, error) { + val, ok := acd.GetAnnotations()[LastSuccessfulSpec] + if !ok { + return nil, nil + } + + specBytes := []byte(val) + sucSpec := AutonomousContainerDatabaseSpec{} + + err := json.Unmarshal(specBytes, &sucSpec) + if err != nil { + return nil, err + } + + return &sucSpec, nil +} + +func (acd *AutonomousContainerDatabase) UpdateLastSuccessfulSpec() error { + specBytes, err := json.Marshal(acd.Spec) + if err != nil { + return err + } + + anns := acd.GetAnnotations() + + if anns == nil { + anns = map[string]string{ + LastSuccessfulSpec: string(specBytes), + } + } else { + anns[LastSuccessfulSpec] = string(specBytes) + } + + acd.SetAnnotations(anns) + + return nil +} + +// UpdateStatusFromOCIACD updates the status subresource +func (acd *AutonomousContainerDatabase) UpdateStatusFromOCIACD(ociObj database.AutonomousContainerDatabase) { + acd.Status.LifecycleState = ociObj.LifecycleState + acd.Status.TimeCreated = FormatSDKTime(ociObj.TimeCreated) +} + +// UpdateFromOCIADB updates the attributes using database.AutonomousContainerDatabase object +func (acd *AutonomousContainerDatabase) UpdateFromOCIACD(ociObj database.AutonomousContainerDatabase) (specChanged bool) { + oldACD := acd.DeepCopy() + + /*********************************** + * update the spec + ***********************************/ + acd.Spec.Action = AcdActionBlank + acd.Spec.AutonomousContainerDatabaseOCID = ociObj.Id + acd.Spec.CompartmentOCID = ociObj.CompartmentId + acd.Spec.DisplayName = ociObj.DisplayName + acd.Spec.AutonomousExadataVMClusterOCID = ociObj.CloudAutonomousVmClusterId + acd.Spec.PatchModel = ociObj.PatchModel + + // special case: an emtpy map will be nil after unmarshalling while the OCI always returns an emty map. + if len(ociObj.FreeformTags) != 0 { + acd.Spec.FreeformTags = ociObj.FreeformTags + } else { + acd.Spec.FreeformTags = nil + } + + /*********************************** + * update the status subresource + ***********************************/ + acd.UpdateStatusFromOCIACD(ociObj) + + return !reflect.DeepEqual(oldACD.Spec, acd.Spec) +} + +// RemoveUnchangedSpec removes the unchanged fields in spec, and returns if the spec has been changed. +func (acd *AutonomousContainerDatabase) RemoveUnchangedSpec(prevSpec AutonomousContainerDatabaseSpec) (bool, error) { + changed, err := RemoveUnchangedFields(prevSpec, &acd.Spec) + if err != nil { + return changed, err + } + + return changed, nil +} + +// A helper function which is useful for debugging. The function prints out a structural JSON format. +func (acd *AutonomousContainerDatabase) String() (string, error) { + out, err := json.MarshalIndent(acd, "", " ") + if err != nil { + return "", err + } + return string(out), nil +} diff --git a/apis/database/v4/autonomouscontainerdatabase_webhook.go b/apis/database/v4/autonomouscontainerdatabase_webhook.go new file mode 100644 index 00000000..9fcb9d8b --- /dev/null +++ b/apis/database/v4/autonomouscontainerdatabase_webhook.go @@ -0,0 +1,110 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var autonomouscontainerdatabaselog = logf.Log.WithName("autonomouscontainerdatabase-resource") + +func (r *AutonomousContainerDatabase) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +//+kubebuilder:webhook:verbs=create;update,path=/validate-database-oracle-com-v4-autonomouscontainerdatabase,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=autonomouscontainerdatabases,versions=v4,name=vautonomouscontainerdatabasev4.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &AutonomousContainerDatabase{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousContainerDatabase) ValidateCreate() (admission.Warnings, error) { + autonomouscontainerdatabaselog.Info("validate create", "name", r.Name) + return nil, nil +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousContainerDatabase) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + var allErrs field.ErrorList + var oldACD *AutonomousContainerDatabase = old.(*AutonomousContainerDatabase) + + autonomouscontainerdatabaselog.Info("validate update", "name", r.Name) + + // skip the update of adding ADB OCID or binding + if oldACD.Status.LifecycleState == "" { + return nil, nil + } + + // cannot update when the old state is in intermediate state, except for the terminate operatrion + var copiedSpec *AutonomousContainerDatabaseSpec = r.Spec.DeepCopy() + changed, err := RemoveUnchangedFields(oldACD.Spec, copiedSpec) + if err != nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec"), err.Error())) + } + if IsACDIntermediateState(oldACD.Status.LifecycleState) && changed { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec"), + "cannot change the spec when the lifecycleState is in an intermdeiate state")) + } + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "AutonomousContainerDatabase"}, + r.Name, allErrs) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousContainerDatabase) ValidateDelete() (admission.Warnings, error) { + autonomouscontainerdatabaselog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} diff --git a/apis/database/v4/autonomousdatabase_types.go b/apis/database/v4/autonomousdatabase_types.go new file mode 100644 index 00000000..628dd882 --- /dev/null +++ b/apis/database/v4/autonomousdatabase_types.go @@ -0,0 +1,393 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + "encoding/json" + "reflect" + + "github.com/oracle/oci-go-sdk/v65/database" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// AutonomousDatabaseSpec defines the desired state of AutonomousDatabase +// Important: Run "make" to regenerate code after modifying this file +type AutonomousDatabaseSpec struct { + // +kubebuilder:validation:Enum:="";Create;Sync;Update;Stop;Start;Terminate;Clone + Action string `json:"action"` + Details AutonomousDatabaseDetails `json:"details,omitempty"` + Clone AutonomousDatabaseClone `json:"clone,omitempty"` + Wallet WalletSpec `json:"wallet,omitempty"` + OciConfig OciConfigSpec `json:"ociConfig,omitempty"` + // +kubebuilder:default:=false + HardLink *bool `json:"hardLink,omitempty"` +} + +type AutonomousDatabaseDetails struct { + AutonomousDatabaseBase `json:",inline"` + Id *string `json:"id,omitempty"` +} + +type AutonomousDatabaseClone struct { + AutonomousDatabaseBase `json:",inline"` + // +kubebuilder:validation:Enum:="FULL";"METADATA" + CloneType database.CreateAutonomousDatabaseCloneDetailsCloneTypeEnum `json:"cloneType,omitempty"` +} + +// AutonomousDatabaseBase defines the detail information of AutonomousDatabase, corresponding to oci-go-sdk/database/AutonomousDatabase +type AutonomousDatabaseBase struct { + CompartmentId *string `json:"compartmentId,omitempty"` + AutonomousContainerDatabase AcdSpec `json:"autonomousContainerDatabase,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + DbName *string `json:"dbName,omitempty"` + // +kubebuilder:validation:Enum:="OLTP";"DW";"AJD";"APEX" + DbWorkload database.AutonomousDatabaseDbWorkloadEnum `json:"dbWorkload,omitempty"` + // +kubebuilder:validation:Enum:="LICENSE_INCLUDED";"BRING_YOUR_OWN_LICENSE" + LicenseModel database.AutonomousDatabaseLicenseModelEnum `json:"licenseModel,omitempty"` + DbVersion *string `json:"dbVersion,omitempty"` + DataStorageSizeInTBs *int `json:"dataStorageSizeInTBs,omitempty"` + CpuCoreCount *int `json:"cpuCoreCount,omitempty"` + // +kubebuilder:validation:Enum:="ECPU";"OCPU" + ComputeModel database.AutonomousDatabaseComputeModelEnum `json:"computeModel,omitempty"` + ComputeCount *float32 `json:"computeCount,omitempty"` + OcpuCount *float32 `json:"ocpuCount,omitempty"` + AdminPassword PasswordSpec `json:"adminPassword,omitempty"` + IsAutoScalingEnabled *bool `json:"isAutoScalingEnabled,omitempty"` + IsDedicated *bool `json:"isDedicated,omitempty"` + IsFreeTier *bool `json:"isFreeTier,omitempty"` + + // NetworkAccess + IsAccessControlEnabled *bool `json:"isAccessControlEnabled,omitempty"` + WhitelistedIps []string `json:"whitelistedIps,omitempty"` + SubnetId *string `json:"subnetId,omitempty"` + NsgIds []string `json:"nsgIds,omitempty"` + PrivateEndpointLabel *string `json:"privateEndpointLabel,omitempty"` + IsMtlsConnectionRequired *bool `json:"isMtlsConnectionRequired,omitempty"` + + FreeformTags map[string]string `json:"freeformTags,omitempty"` +} + +/************************ +* ACD specs +************************/ +type K8sAcdSpec struct { + Name *string `json:"name,omitempty"` +} + +type OciAcdSpec struct { + Id *string `json:"id,omitempty"` +} + +// AcdSpec defines the spec of the target for backup/restore runs. +// The name could be the name of an AutonomousDatabase or an AutonomousDatabaseBackup +type AcdSpec struct { + K8sAcd K8sAcdSpec `json:"k8sAcd,omitempty"` + OciAcd OciAcdSpec `json:"ociAcd,omitempty"` +} + +/************************ +* Secret specs +************************/ +type K8sSecretSpec struct { + Name *string `json:"name,omitempty"` +} + +type OciSecretSpec struct { + Id *string `json:"id,omitempty"` +} + +type PasswordSpec struct { + K8sSecret K8sSecretSpec `json:"k8sSecret,omitempty"` + OciSecret OciSecretSpec `json:"ociSecret,omitempty"` +} + +type WalletSpec struct { + Name *string `json:"name,omitempty"` + Password PasswordSpec `json:"password,omitempty"` +} + +// AutonomousDatabaseStatus defines the observed state of AutonomousDatabase +type AutonomousDatabaseStatus struct { + // Lifecycle State of the ADB + LifecycleState database.AutonomousDatabaseLifecycleStateEnum `json:"lifecycleState,omitempty"` + // Creation time of the ADB + TimeCreated string `json:"timeCreated,omitempty"` + // Expiring date of the instance wallet + WalletExpiringDate string `json:"walletExpiringDate,omitempty"` + // Connection Strings of the ADB + AllConnectionStrings []ConnectionStringProfile `json:"allConnectionStrings,omitempty"` + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +type TLSAuthenticationEnum string + +const ( + tlsAuthenticationTLS TLSAuthenticationEnum = "TLS" + tlsAuthenticationMTLS TLSAuthenticationEnum = "Mutual TLS" +) + +func GetTLSAuthenticationEnumFromString(val string) (TLSAuthenticationEnum, bool) { + var mappingTLSAuthenticationEnum = map[string]TLSAuthenticationEnum{ + "TLS": tlsAuthenticationTLS, + "Mutual TLS": tlsAuthenticationMTLS, + } + + enum, ok := mappingTLSAuthenticationEnum[val] + return enum, ok +} + +type ConnectionStringProfile struct { + TLSAuthentication TLSAuthenticationEnum `json:"tlsAuthentication,omitempty"` + ConnectionStrings []ConnectionStringSpec `json:"connectionStrings"` +} + +type ConnectionStringSpec struct { + TNSName string `json:"tnsName,omitempty"` + ConnectionString string `json:"connectionString,omitempty"` +} + +// AutonomousDatabase is the Schema for the autonomousdatabases API +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName="adb";"adbs" +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".spec.details.displayName",name="Display Name",type=string +// +kubebuilder:printcolumn:JSONPath=".spec.details.dbName",name="Db Name",type=string +// +kubebuilder:printcolumn:JSONPath=".status.lifecycleState",name="State",type=string +// +kubebuilder:printcolumn:JSONPath=".spec.details.isDedicated",name="Dedicated",type=string +// +kubebuilder:printcolumn:JSONPath=".spec.details.cpuCoreCount",name="OCPUs",type=integer +// +kubebuilder:printcolumn:JSONPath=".spec.details.dataStorageSizeInTBs",name="Storage (TB)",type=integer +// +kubebuilder:printcolumn:JSONPath=".spec.details.dbWorkload",name="Workload Type",type=string +// +kubebuilder:printcolumn:JSONPath=".status.timeCreated",name="Created",type=string +// +kubebuilder:storageversion +type AutonomousDatabase struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AutonomousDatabaseSpec `json:"spec,omitempty"` + Status AutonomousDatabaseStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AutonomousDatabaseList contains a list of AutonomousDatabase +type AutonomousDatabaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AutonomousDatabase `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AutonomousDatabase{}, &AutonomousDatabaseList{}) +} + +// Implement conversion.Hub interface, which means any resource version can convert into v4 +func (*AutonomousDatabase) Hub() {} + +// UpdateStatusFromOCIADB updates the status subresource +func (adb *AutonomousDatabase) UpdateStatusFromOciAdb(ociObj database.AutonomousDatabase) { + adb.Status.LifecycleState = ociObj.LifecycleState + adb.Status.TimeCreated = FormatSDKTime(ociObj.TimeCreated) + + if *ociObj.IsDedicated { + conns := make([]ConnectionStringSpec, len(ociObj.ConnectionStrings.AllConnectionStrings)) + for key, val := range ociObj.ConnectionStrings.AllConnectionStrings { + conns = append(conns, ConnectionStringSpec{TNSName: key, ConnectionString: val}) + } + + adb.Status.AllConnectionStrings = []ConnectionStringProfile{ + {ConnectionStrings: conns}, + } + } else { + var mTLSConns []ConnectionStringSpec + var tlsConns []ConnectionStringSpec + + var conns []ConnectionStringProfile + + for _, profile := range ociObj.ConnectionStrings.Profiles { + if profile.TlsAuthentication == database.DatabaseConnectionStringProfileTlsAuthenticationMutual { + mTLSConns = append(mTLSConns, ConnectionStringSpec{TNSName: *profile.DisplayName, ConnectionString: *profile.Value}) + } else { + tlsConns = append(tlsConns, ConnectionStringSpec{TNSName: *profile.DisplayName, ConnectionString: *profile.Value}) + } + } + + if len(mTLSConns) > 0 { + conns = append(conns, ConnectionStringProfile{ + TLSAuthentication: tlsAuthenticationMTLS, + ConnectionStrings: mTLSConns, + }) + } + + if len(tlsConns) > 0 { + conns = append(conns, ConnectionStringProfile{ + TLSAuthentication: tlsAuthenticationTLS, + ConnectionStrings: tlsConns, + }) + } + + adb.Status.AllConnectionStrings = conns + } +} + +// UpdateFromOciAdb updates the attributes using database.AutonomousDatabase object +func (adb *AutonomousDatabase) UpdateFromOciAdb(ociObj database.AutonomousDatabase, overwrite bool) (specChanged bool) { + oldADB := adb.DeepCopy() + + /*********************************** + * update the spec + ***********************************/ + if overwrite || adb.Spec.Details.Id == nil { + adb.Spec.Details.Id = ociObj.Id + } + if overwrite || adb.Spec.Details.CompartmentId == nil { + adb.Spec.Details.CompartmentId = ociObj.CompartmentId + } + if overwrite || adb.Spec.Details.AutonomousContainerDatabase.OciAcd.Id == nil { + adb.Spec.Details.AutonomousContainerDatabase.OciAcd.Id = ociObj.AutonomousContainerDatabaseId + } + if overwrite || adb.Spec.Details.DisplayName == nil { + adb.Spec.Details.DisplayName = ociObj.DisplayName + } + if overwrite || adb.Spec.Details.DbName == nil { + adb.Spec.Details.DbName = ociObj.DbName + } + if overwrite || adb.Spec.Details.DbWorkload == "" { + adb.Spec.Details.DbWorkload = ociObj.DbWorkload + } + if overwrite || adb.Spec.Details.LicenseModel == "" { + adb.Spec.Details.LicenseModel = ociObj.LicenseModel + } + if overwrite || adb.Spec.Details.DbVersion == nil { + adb.Spec.Details.DbVersion = ociObj.DbVersion + } + if overwrite || adb.Spec.Details.DataStorageSizeInTBs == nil { + adb.Spec.Details.DataStorageSizeInTBs = ociObj.DataStorageSizeInTBs + } + if overwrite || adb.Spec.Details.CpuCoreCount == nil { + adb.Spec.Details.CpuCoreCount = ociObj.CpuCoreCount + } + if overwrite || adb.Spec.Details.ComputeModel == "" { + adb.Spec.Details.ComputeModel = ociObj.ComputeModel + } + if overwrite || adb.Spec.Details.OcpuCount == nil { + adb.Spec.Details.OcpuCount = ociObj.OcpuCount + } + if overwrite || adb.Spec.Details.ComputeCount == nil { + adb.Spec.Details.ComputeCount = ociObj.ComputeCount + } + if overwrite || adb.Spec.Details.IsAutoScalingEnabled == nil { + adb.Spec.Details.IsAutoScalingEnabled = ociObj.IsAutoScalingEnabled + } + if overwrite || adb.Spec.Details.IsDedicated == nil { + adb.Spec.Details.IsDedicated = ociObj.IsDedicated + } + if overwrite || adb.Spec.Details.IsFreeTier == nil { + adb.Spec.Details.IsFreeTier = ociObj.IsFreeTier + } + if overwrite || adb.Spec.Details.FreeformTags == nil { + // Special case: an emtpy map will be nil after unmarshalling while the OCI always returns an emty map. + if len(ociObj.FreeformTags) != 0 { + adb.Spec.Details.FreeformTags = ociObj.FreeformTags + } else { + adb.Spec.Details.FreeformTags = nil + } + } + + if overwrite || adb.Spec.Details.IsAccessControlEnabled == nil { + adb.Spec.Details.IsAccessControlEnabled = ociObj.IsAccessControlEnabled + } + + if overwrite || adb.Spec.Details.WhitelistedIps == nil { + if len(ociObj.WhitelistedIps) != 0 { + adb.Spec.Details.WhitelistedIps = ociObj.WhitelistedIps + } else { + adb.Spec.Details.WhitelistedIps = nil + } + } + if overwrite || adb.Spec.Details.IsMtlsConnectionRequired == nil { + adb.Spec.Details.IsMtlsConnectionRequired = ociObj.IsMtlsConnectionRequired + } + if overwrite || adb.Spec.Details.SubnetId == nil { + adb.Spec.Details.SubnetId = ociObj.SubnetId + } + if overwrite || adb.Spec.Details.NsgIds == nil { + if len(ociObj.NsgIds) != 0 { + adb.Spec.Details.NsgIds = ociObj.NsgIds + } else { + adb.Spec.Details.NsgIds = nil + } + } + if overwrite || adb.Spec.Details.PrivateEndpointLabel == nil { + adb.Spec.Details.PrivateEndpointLabel = ociObj.PrivateEndpointLabel + } + + /*********************************** + * update the status subresource + ***********************************/ + adb.UpdateStatusFromOciAdb(ociObj) + + return !reflect.DeepEqual(oldADB.Spec, adb.Spec) +} + +// RemoveUnchangedDetails removes the unchanged fields in spec.details, and returns if the details has been changed. +func (adb *AutonomousDatabase) RemoveUnchangedDetails(prevSpec AutonomousDatabaseSpec) (bool, error) { + + changed, err := RemoveUnchangedFields(prevSpec.Details, &adb.Spec.Details) + if err != nil { + return changed, err + } + + return changed, nil +} + +// A helper function which is useful for debugging. The function prints out a structural JSON format. +func (adb *AutonomousDatabase) String() (string, error) { + out, err := json.MarshalIndent(adb, "", " ") + if err != nil { + return "", err + } + return string(out), nil +} diff --git a/apis/database/v4/autonomousdatabase_webhook.go b/apis/database/v4/autonomousdatabase_webhook.go new file mode 100644 index 00000000..f7eb60aa --- /dev/null +++ b/apis/database/v4/autonomousdatabase_webhook.go @@ -0,0 +1,170 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + dbcommons "github.com/oracle/oracle-database-operator/commons/database" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var autonomousdatabaselog = logf.Log.WithName("autonomousdatabase-resource") + +func (r *AutonomousDatabase) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// +kubebuilder:webhook:verbs=create;update,path=/validate-database-oracle-com-v4-autonomousdatabase,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=autonomousdatabases,versions=v4,name=vautonomousdatabasev4.kb.io,admissionReviewVersions=v1 +var _ webhook.Validator = &AutonomousDatabase{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +// ValidateCreate checks if the spec is valid for a provisioning or a binding operation +func (r *AutonomousDatabase) ValidateCreate() (admission.Warnings, error) { + var allErrs field.ErrorList + + autonomousdatabaselog.Info("validate create", "name", r.Name) + + namespaces := dbcommons.GetWatchNamespaces() + _, hasEmptyString := namespaces[""] + isClusterScoped := len(namespaces) == 1 && hasEmptyString + if !isClusterScoped { + _, containsNamespace := namespaces[r.Namespace] + // Check if the allowed namespaces maps contains the required namespace + if len(namespaces) != 0 && !containsNamespace { + allErrs = append(allErrs, + field.Invalid(field.NewPath("metadata").Child("namespace"), r.Namespace, + "Oracle database operator doesn't watch over this namespace")) + } + } + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "AutonomousDatabase"}, + r.Name, allErrs) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousDatabase) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + var allErrs field.ErrorList + var oldADB *AutonomousDatabase = old.(*AutonomousDatabase) + + autonomousdatabaselog.Info("validate update", "name", r.Name) + + // skip the verification of adding ADB OCID or binding + // if oldADB.Status.LifecycleState == "" { + // return nil, nil + // } + + // cannot update when the old state is in intermediate, except for the change to the hardLink or the terminate operatrion during valid lifecycleState + // var copySpec *AutonomousDatabaseSpec = r.Spec.DeepCopy() + // specChanged, err := RemoveUnchangedFields(oldADB.Spec, copySpec) + // if err != nil { + // allErrs = append(allErrs, + // field.Forbidden(field.NewPath("spec"), err.Error())) + // } + + // hardLinkChanged := copySpec.HardLink != nil + + // isTerminateOp := CanBeTerminated(oldADB.Status.LifecycleState) && copySpec.Action == "Terminate" + + // if specChanged && IsAdbIntermediateState(oldADB.Status.LifecycleState) && !isTerminateOp && !hardLinkChanged { + // allErrs = append(allErrs, + // field.Forbidden(field.NewPath("spec"), + // "cannot change the spec when the lifecycleState is in an intermdeiate state")) + // } + + // cannot modify autonomousDatabaseOCID + if r.Spec.Details.Id != nil && + oldADB.Spec.Details.Id != nil && + *r.Spec.Details.Id != *oldADB.Spec.Details.Id { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("details").Child("autonomousDatabaseOCID"), + "autonomousDatabaseOCID cannot be modified")) + } + + allErrs = validateCommon(r, allErrs) + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "AutonomousDatabase"}, + r.Name, allErrs) +} + +func validateCommon(adb *AutonomousDatabase, allErrs field.ErrorList) field.ErrorList { + // password + if adb.Spec.Details.AdminPassword.K8sSecret.Name != nil && adb.Spec.Details.AdminPassword.OciSecret.Id != nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("details").Child("adminPassword"), + "cannot apply k8sSecret.name and ociSecret.ocid at the same time")) + } + + if adb.Spec.Wallet.Password.K8sSecret.Name != nil && adb.Spec.Wallet.Password.OciSecret.Id != nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("details").Child("wallet").Child("password"), + "cannot apply k8sSecret.name and ociSecret.ocid at the same time")) + } + + return allErrs +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousDatabase) ValidateDelete() (admission.Warnings, error) { + autonomousdatabaselog.Info("validate delete", "name", r.Name) + return nil, nil +} + +// Returns true if AutonomousContainerDatabaseOCID has value. +// We don't use Details.IsDedicated because the parameter might be null when it's a provision operation. +func isDedicated(adb *AutonomousDatabase) bool { + return adb.Spec.Details.AutonomousContainerDatabase.K8sAcd.Name != nil || + adb.Spec.Details.AutonomousContainerDatabase.OciAcd.Id != nil +} diff --git a/apis/database/v4/autonomousdatabasebackup_types.go b/apis/database/v4/autonomousdatabasebackup_types.go new file mode 100644 index 00000000..925256c0 --- /dev/null +++ b/apis/database/v4/autonomousdatabasebackup_types.go @@ -0,0 +1,129 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/database" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// AutonomousDatabaseBackupSpec defines the desired state of AutonomousDatabaseBackup +type AutonomousDatabaseBackupSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + Target TargetSpec `json:"target,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + AutonomousDatabaseBackupOCID *string `json:"autonomousDatabaseBackupOCID,omitempty"` + IsLongTermBackup *bool `json:"isLongTermBackup,omitempty"` + RetentionPeriodInDays *int `json:"retentionPeriodInDays,omitempty"` + OCIConfig OciConfigSpec `json:"ociConfig,omitempty"` +} + +// AutonomousDatabaseBackupStatus defines the observed state of AutonomousDatabaseBackup +type AutonomousDatabaseBackupStatus struct { + LifecycleState database.AutonomousDatabaseBackupLifecycleStateEnum `json:"lifecycleState"` + Type database.AutonomousDatabaseBackupTypeEnum `json:"type"` + IsAutomatic bool `json:"isAutomatic"` + TimeStarted string `json:"timeStarted,omitempty"` + TimeEnded string `json:"timeEnded,omitempty"` + AutonomousDatabaseOCID string `json:"autonomousDatabaseOCID"` + CompartmentOCID string `json:"compartmentOCID"` + DBName string `json:"dbName"` + DBDisplayName string `json:"dbDisplayName"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:resource:shortName="adbbu";"adbbus" +//+kubebuilder:printcolumn:JSONPath=".status.lifecycleState",name="State",type=string +//+kubebuilder:printcolumn:JSONPath=".status.dbDisplayName",name="DB DisplayName",type=string +//+kubebuilder:printcolumn:JSONPath=".status.type",name="Type",type=string +//+kubebuilder:printcolumn:JSONPath=".status.timeStarted",name="Started",type=string +//+kubebuilder:printcolumn:JSONPath=".status.timeEnded",name="Ended",type=string +// +kubebuilder:storageversion + +// AutonomousDatabaseBackup is the Schema for the autonomousdatabasebackups API +type AutonomousDatabaseBackup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AutonomousDatabaseBackupSpec `json:"spec,omitempty"` + Status AutonomousDatabaseBackupStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// AutonomousDatabaseBackupList contains a list of AutonomousDatabaseBackup +type AutonomousDatabaseBackupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AutonomousDatabaseBackup `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AutonomousDatabaseBackup{}, &AutonomousDatabaseBackupList{}) +} + +// Implement conversion.Hub interface, which means any resource version can convert into v4 +func (*AutonomousDatabaseBackup) Hub() {} + +func (b *AutonomousDatabaseBackup) UpdateStatusFromOCIBackup(ociBackup database.AutonomousDatabaseBackup, ociADB database.AutonomousDatabase) { + b.Status.AutonomousDatabaseOCID = *ociBackup.AutonomousDatabaseId + b.Status.CompartmentOCID = *ociBackup.CompartmentId + b.Status.Type = ociBackup.Type + b.Status.IsAutomatic = *ociBackup.IsAutomatic + + b.Status.LifecycleState = ociBackup.LifecycleState + + b.Status.TimeStarted = FormatSDKTime(ociBackup.TimeStarted) + b.Status.TimeEnded = FormatSDKTime(ociBackup.TimeEnded) + + b.Status.DBDisplayName = *ociADB.DisplayName + b.Status.DBName = *ociADB.DbName +} + +// GetTimeEnded returns the status.timeEnded in SDKTime format +func (b *AutonomousDatabaseBackup) GetTimeEnded() (*common.SDKTime, error) { + return ParseDisplayTime(b.Status.TimeEnded) +} diff --git a/apis/database/v4/autonomousdatabasebackup_webhook.go b/apis/database/v4/autonomousdatabasebackup_webhook.go new file mode 100644 index 00000000..7858adce --- /dev/null +++ b/apis/database/v4/autonomousdatabasebackup_webhook.go @@ -0,0 +1,158 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + dbcommons "github.com/oracle/oracle-database-operator/commons/database" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var autonomousdatabasebackuplog = logf.Log.WithName("autonomousdatabasebackup-resource") + +func (r *AutonomousDatabaseBackup) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +//+kubebuilder:webhook:path=/mutate-database-oracle-com-v4-autonomousdatabasebackup,mutating=true,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=autonomousdatabasebackups,verbs=create;update,versions=v4,name=mautonomousdatabasebackupv4.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &AutonomousDatabaseBackup{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *AutonomousDatabaseBackup) Default() { + autonomousdatabasebackuplog.Info("default", "name", r.Name) +} + +//+kubebuilder:webhook:verbs=create;update,path=/validate-database-oracle-com-v4-autonomousdatabasebackup,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=autonomousdatabasebackups,versions=v4,name=vautonomousdatabasebackupv4.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &AutonomousDatabaseBackup{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousDatabaseBackup) ValidateCreate() (admission.Warnings, error) { + autonomousdatabasebackuplog.Info("validate create", "name", r.Name) + + var allErrs field.ErrorList + + namespaces := dbcommons.GetWatchNamespaces() + _, hasEmptyString := namespaces[""] + isClusterScoped := len(namespaces) == 1 && hasEmptyString + if !isClusterScoped { + _, containsNamespace := namespaces[r.Namespace] + // Check if the allowed namespaces maps contains the required namespace + if len(namespaces) != 0 && !containsNamespace { + allErrs = append(allErrs, + field.Invalid(field.NewPath("metadata").Child("namespace"), r.Namespace, + "Oracle database operator doesn't watch over this namespace")) + } + } + + if r.Spec.Target.K8sAdb.Name == nil && r.Spec.Target.OciAdb.OCID == nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("target"), "target ADB is empty")) + } + + if r.Spec.Target.K8sAdb.Name != nil && r.Spec.Target.OciAdb.OCID != nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("target"), "specify either k8sADB or ociADB, but not both")) + } + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "AutonomousDatabaseBackup"}, + r.Name, allErrs) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousDatabaseBackup) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + autonomousdatabasebackuplog.Info("validate update", "name", r.Name) + + var allErrs field.ErrorList + oldBackup := old.(*AutonomousDatabaseBackup) + + if oldBackup.Spec.AutonomousDatabaseBackupOCID != nil && r.Spec.AutonomousDatabaseBackupOCID != nil && + *oldBackup.Spec.AutonomousDatabaseBackupOCID != *r.Spec.AutonomousDatabaseBackupOCID { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("autonomousDatabaseBackupOCID"), + "cannot assign a new autonomousDatabaseBackupOCID to this backup")) + } + + if oldBackup.Spec.Target.K8sAdb.Name != nil && r.Spec.Target.K8sAdb.Name != nil && + *oldBackup.Spec.Target.K8sAdb.Name != *r.Spec.Target.K8sAdb.Name { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("target").Child("k8sADB").Child("name"), "cannot assign a new name to the target")) + } + + if oldBackup.Spec.Target.OciAdb.OCID != nil && r.Spec.Target.OciAdb.OCID != nil && + *oldBackup.Spec.Target.OciAdb.OCID != *r.Spec.Target.OciAdb.OCID { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("target").Child("ociADB").Child("ocid"), "cannot assign a new ocid to the target")) + } + + if oldBackup.Spec.DisplayName != nil && r.Spec.DisplayName != nil && + *oldBackup.Spec.DisplayName != *r.Spec.DisplayName { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("displayName"), "cannot assign a new displayName to this backup")) + } + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "AutonomousDatabaseBackup"}, + r.Name, allErrs) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousDatabaseBackup) ValidateDelete() (admission.Warnings, error) { + autonomousdatabasebackuplog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} diff --git a/apis/database/v4/autonomousdatabaserestore_types.go b/apis/database/v4/autonomousdatabaserestore_types.go new file mode 100644 index 00000000..3337c983 --- /dev/null +++ b/apis/database/v4/autonomousdatabaserestore_types.go @@ -0,0 +1,142 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + "errors" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/oracle/oci-go-sdk/v65/common" + "github.com/oracle/oci-go-sdk/v65/database" + "github.com/oracle/oci-go-sdk/v65/workrequests" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +type K8sADBBackupSpec struct { + Name *string `json:"name,omitempty"` +} + +type PITSpec struct { + // The timestamp must follow this format: YYYY-MM-DD HH:MM:SS GMT + Timestamp *string `json:"timestamp,omitempty"` +} + +type SourceSpec struct { + K8sAdbBackup K8sADBBackupSpec `json:"k8sADBBackup,omitempty"` + PointInTime PITSpec `json:"pointInTime,omitempty"` +} + +// AutonomousDatabaseRestoreSpec defines the desired state of AutonomousDatabaseRestore +type AutonomousDatabaseRestoreSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + Target TargetSpec `json:"target"` + Source SourceSpec `json:"source"` + OCIConfig OciConfigSpec `json:"ociConfig,omitempty"` +} + +// AutonomousDatabaseRestoreStatus defines the observed state of AutonomousDatabaseRestore +type AutonomousDatabaseRestoreStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + DisplayName string `json:"displayName"` + TimeAccepted string `json:"timeAccepted,omitempty"` + TimeStarted string `json:"timeStarted,omitempty"` + TimeEnded string `json:"timeEnded,omitempty"` + DbName string `json:"dbName"` + WorkRequestOCID string `json:"workRequestOCID"` + Status workrequests.WorkRequestStatusEnum `json:"status"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:resource:shortName="adbr";"adbrs" +// +kubebuilder:printcolumn:JSONPath=".status.status",name="Status",type=string +// +kubebuilder:printcolumn:JSONPath=".status.displayName",name="DbDisplayName",type=string +// +kubebuilder:printcolumn:JSONPath=".status.dbName",name="DbName",type=string +// +kubebuilder:storageversion + +// AutonomousDatabaseRestore is the Schema for the autonomousdatabaserestores API +type AutonomousDatabaseRestore struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AutonomousDatabaseRestoreSpec `json:"spec,omitempty"` + Status AutonomousDatabaseRestoreStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// AutonomousDatabaseRestoreList contains a list of AutonomousDatabaseRestore +type AutonomousDatabaseRestoreList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AutonomousDatabaseRestore `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AutonomousDatabaseRestore{}, &AutonomousDatabaseRestoreList{}) +} + +// Implement conversion.Hub interface, which means any resource version can convert into v4 +func (*AutonomousDatabaseRestore) Hub() {} + +// GetPIT returns the spec.pointInTime.timeStamp in SDKTime format +func (r *AutonomousDatabaseRestore) GetPIT() (*common.SDKTime, error) { + if r.Spec.Source.PointInTime.Timestamp == nil { + return nil, errors.New("the timestamp is empty") + } + return ParseDisplayTime(*r.Spec.Source.PointInTime.Timestamp) +} + +func (r *AutonomousDatabaseRestore) UpdateStatus( + adb database.AutonomousDatabase, + workResp workrequests.GetWorkRequestResponse) { + + r.Status.DisplayName = *adb.DisplayName + r.Status.DbName = *adb.DbName + + r.Status.WorkRequestOCID = *workResp.Id + r.Status.Status = workResp.Status + r.Status.TimeAccepted = FormatSDKTime(workResp.TimeAccepted) + r.Status.TimeStarted = FormatSDKTime(workResp.TimeStarted) + r.Status.TimeEnded = FormatSDKTime(workResp.TimeFinished) +} diff --git a/apis/database/v4/autonomousdatabaserestore_webhook.go b/apis/database/v4/autonomousdatabaserestore_webhook.go new file mode 100644 index 00000000..6e3b4656 --- /dev/null +++ b/apis/database/v4/autonomousdatabaserestore_webhook.go @@ -0,0 +1,146 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + dbcommons "github.com/oracle/oracle-database-operator/commons/database" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var autonomousdatabaserestorelog = logf.Log.WithName("autonomousdatabaserestore-resource") + +func (r *AutonomousDatabaseRestore) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +//+kubebuilder:webhook:verbs=create;update,path=/validate-database-oracle-com-v4-autonomousdatabaserestore,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=autonomousdatabaserestores,versions=v4,name=vautonomousdatabaserestorev4.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &AutonomousDatabaseRestore{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousDatabaseRestore) ValidateCreate() (admission.Warnings, error) { + autonomousdatabaserestorelog.Info("validate create", "name", r.Name) + + var allErrs field.ErrorList + + namespaces := dbcommons.GetWatchNamespaces() + _, hasEmptyString := namespaces[""] + isClusterScoped := len(namespaces) == 1 && hasEmptyString + if !isClusterScoped { + _, containsNamespace := namespaces[r.Namespace] + // Check if the allowed namespaces maps contains the required namespace + if len(namespaces) != 0 && !containsNamespace { + allErrs = append(allErrs, + field.Invalid(field.NewPath("metadata").Child("namespace"), r.Namespace, + "Oracle database operator doesn't watch over this namespace")) + } + } + + // Validate the target ADB + if r.Spec.Target.K8sAdb.Name == nil && r.Spec.Target.OciAdb.OCID == nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("target"), "target ADB is empty")) + } + + if r.Spec.Target.K8sAdb.Name != nil && r.Spec.Target.OciAdb.OCID != nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("target"), "specify either k8sADB.name or ociADB.ocid, but not both")) + } + + // Validate the restore source + if r.Spec.Source.K8sAdbBackup.Name == nil && + r.Spec.Source.PointInTime.Timestamp == nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("source"), "retore source is empty")) + } + + if r.Spec.Source.K8sAdbBackup.Name != nil && + r.Spec.Source.PointInTime.Timestamp != nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("source"), "cannot apply backupName and the PITR parameters at the same time")) + } + + // Verify the timestamp format if it's PITR + if r.Spec.Source.PointInTime.Timestamp != nil { + _, err := ParseDisplayTime(*r.Spec.Source.PointInTime.Timestamp) + if err != nil { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("source").Child("pointInTime").Child("timestamp"), "invalid timestamp format")) + } + } + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "AutonomousDatabaseRestore"}, + r.Name, allErrs) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousDatabaseRestore) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + autonomousdatabaserestorelog.Info("validate update", "name", r.Name) + + var allErrs field.ErrorList + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "AutonomousDatabaseRestore"}, + r.Name, allErrs) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *AutonomousDatabaseRestore) ValidateDelete() (admission.Warnings, error) { + autonomousdatabaserestorelog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} diff --git a/apis/database/v1alpha1/cdb_types.go b/apis/database/v4/cdb_types.go similarity index 93% rename from apis/database/v1alpha1/cdb_types.go rename to apis/database/v4/cdb_types.go index 206781b2..ce3f6f28 100644 --- a/apis/database/v1alpha1/cdb_types.go +++ b/apis/database/v4/cdb_types.go @@ -36,7 +36,7 @@ ** SOFTWARE. */ -package v1alpha1 +package v4 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -84,8 +84,11 @@ type CDBSpec struct { // DB server port DBPort int `json:"dbPort,omitempty"` // Node Selector for running the Pod - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - DBTnsurl string `json:"dbTnsurl,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + DeletePDBCascade bool `json:"deletePdbCascade,omitempty"` + DBTnsurl string `json:"dbTnsurl,omitempty"` + CDBPubKey CDBPUBKEY `json:"cdbOrdsPubKey,omitempty"` + CDBPriKey CDBPRIVKEY `json:"cdbOrdsPrvKey,omitempty"` } // CDBSecret defines the secretName @@ -132,6 +135,14 @@ type CDBTLSCRT struct { Secret CDBSecret `json:"secret"` } +type CDBPUBKEY struct { + Secret CDBSecret `json:"secret"` +} + +type CDBPRIVKEY struct { + Secret CDBSecret `json:"secret"` +} + // CDBStatus defines the observed state of CDB type CDBStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster @@ -150,11 +161,12 @@ type CDBStatus struct { // +kubebuilder:printcolumn:JSONPath=".spec.cdbName",name="CDB Name",type="string",description="Name of the CDB" // +kubebuilder:printcolumn:JSONPath=".spec.dbServer",name="DB Server",type="string",description=" Name of the DB Server" // +kubebuilder:printcolumn:JSONPath=".spec.dbPort",name="DB Port",type="integer",description="DB server port" -// +kubebuilder:printcolumn:JSONPath=".spec.dbTnsurl",name="TNS STRING",type="string",description=" string of the tnsalias" // +kubebuilder:printcolumn:JSONPath=".spec.replicas",name="Replicas",type="integer",description="Replicas" // +kubebuilder:printcolumn:JSONPath=".status.phase",name="Status",type="string",description="Status of the CDB Resource" // +kubebuilder:printcolumn:JSONPath=".status.msg",name="Message",type="string",description="Error message, if any" +// +kubebuilder:printcolumn:JSONPath=".spec.dbTnsurl",name="TNS STRING",type="string",description=" string of the tnsalias" // +kubebuilder:resource:path=cdbs,scope=Namespaced +// +kubebuilder:storageversion // CDB is the Schema for the cdbs API type CDB struct { diff --git a/apis/database/v1alpha1/cdb_webhook.go b/apis/database/v4/cdb_webhook.go similarity index 92% rename from apis/database/v1alpha1/cdb_webhook.go rename to apis/database/v4/cdb_webhook.go index 345b6f75..235b2627 100644 --- a/apis/database/v1alpha1/cdb_webhook.go +++ b/apis/database/v4/cdb_webhook.go @@ -36,7 +36,7 @@ ** SOFTWARE. */ -package v1alpha1 +package v4 import ( "reflect" @@ -61,7 +61,7 @@ func (r *CDB) SetupWebhookWithManager(mgr ctrl.Manager) error { Complete() } -//+kubebuilder:webhook:path=/mutate-database-oracle-com-v1alpha1-cdb,mutating=true,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=cdbs,verbs=create;update,versions=v1alpha1,name=mcdb.kb.io,admissionReviewVersions={v1,v1beta1} +//+kubebuilder:webhook:path=/mutate-database-oracle-com-v4-cdb,mutating=true,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=cdbs,verbs=create;update,versions=v4,name=mcdb.kb.io,admissionReviewVersions={v1,v1beta1} var _ webhook.Defaulter = &CDB{} @@ -79,7 +79,7 @@ func (r *CDB) Default() { } // TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. -//+kubebuilder:webhook:path=/validate-database-oracle-com-v1alpha1-cdb,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=cdbs,verbs=create;update,versions=v1alpha1,name=vcdb.kb.io,admissionReviewVersions={v1,v1beta1} +//+kubebuilder:webhook:path=/validate-database-oracle-com-v4-cdb,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=cdbs,verbs=create;update,versions=v4,name=vcdb.kb.io,admissionReviewVersions={v1,v1beta1} var _ webhook.Validator = &CDB{} @@ -104,6 +104,11 @@ func (r *CDB) ValidateCreate() (admission.Warnings, error) { field.Required(field.NewPath("spec").Child("cdbTlsCrt"), "Please specify CDB Tls Certificate(secret)")) } + if reflect.ValueOf(r.Spec.CDBPriKey).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("CDBPriKey"), "Please specify CDB CDBPriKey(secret)")) + } + /*if r.Spec.SCANName == "" { allErrs = append(allErrs, field.Required(field.NewPath("spec").Child("scanName"), "Please specify SCAN Name for CDB")) diff --git a/apis/database/v4/dataguardbroker_conversion.go b/apis/database/v4/dataguardbroker_conversion.go new file mode 100644 index 00000000..c63210e0 --- /dev/null +++ b/apis/database/v4/dataguardbroker_conversion.go @@ -0,0 +1,4 @@ +package v4 + +// Hub defines v1 as the hub version +func (*DataguardBroker) Hub() {} diff --git a/apis/database/v4/dataguardbroker_types.go b/apis/database/v4/dataguardbroker_types.go new file mode 100644 index 00000000..cec11ca4 --- /dev/null +++ b/apis/database/v4/dataguardbroker_types.go @@ -0,0 +1,163 @@ +/* +** Copyright (c) 2023 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// DataguardBrokerSpec defines the desired state of DataguardBroker +type DataguardBrokerSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + PrimaryDatabaseRef string `json:"primaryDatabaseRef"` + StandbyDatabaseRefs []string `json:"standbyDatabaseRefs"` + SetAsPrimaryDatabase string `json:"setAsPrimaryDatabase,omitempty"` + LoadBalancer bool `json:"loadBalancer,omitempty"` + ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` + // +kubebuilder:validation:Enum=MaxPerformance;MaxAvailability + ProtectionMode string `json:"protectionMode"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + FastStartFailover bool `json:"fastStartFailover,omitempty"` +} + +// DataguardBrokerStatus defines the observed state of DataguardBroker +type DataguardBrokerStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + PrimaryDatabaseRef string `json:"primaryDatabaseRef,omitempty"` + ProtectionMode string `json:"protectionMode,omitempty"` + PrimaryDatabase string `json:"primaryDatabase,omitempty"` + StandbyDatabases string `json:"standbyDatabases,omitempty"` + ExternalConnectString string `json:"externalConnectString,omitempty"` + ClusterConnectString string `json:"clusterConnectString,omitempty"` + Status string `json:"status,omitempty"` + + FastStartFailover string `json:"fastStartFailover,omitempty"` + DatabasesInDataguardConfig map[string]string `json:"databasesInDataguardConfig,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".status.primaryDatabase",name="Primary",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.standbyDatabases",name="Standbys",type="string" +// +kubebuilder:printcolumn:JSONPath=".spec.protectionMode",name="Protection Mode",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.clusterConnectString",name="Cluster Connect Str",type="string",priority=1 +// +kubebuilder:printcolumn:JSONPath=".status.externalConnectString",name="Connect Str",type="string" +// +kubebuilder:printcolumn:JSONPath=".spec.primaryDatabaseRef",name="Primary Database",type="string", priority=1 +// +kubebuilder:printcolumn:JSONPath=".status.status",name="Status",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.fastStartFailover",name="FSFO", type="string" + +// DataguardBroker is the Schema for the dataguardbrokers API +// +kubebuilder:storageversion +type DataguardBroker struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DataguardBrokerSpec `json:"spec,omitempty"` + Status DataguardBrokerStatus `json:"status,omitempty"` +} + +// ////////////////////////////////////////////////////////////////////////////////////////////////// +// Returns the current primary database in the dataguard configuration from the resource status/spec +// ////////////////////////////////////////////////////////////////////////////////////////////////// +func (broker *DataguardBroker) GetCurrentPrimaryDatabase() string { + if broker.Status.PrimaryDatabase != "" { + return broker.Status.DatabasesInDataguardConfig[broker.Status.PrimaryDatabase] + } + return broker.Spec.PrimaryDatabaseRef +} + +// ////////////////////////////////////////////////////////////////////////////////////////////////// +// Returns databases in Dataguard configuration from the resource status/spec +// ////////////////////////////////////////////////////////////////////////////////////////////////// +func (broker *DataguardBroker) GetDatabasesInDataGuardConfiguration() []string { + var databases []string + if len(broker.Status.DatabasesInDataguardConfig) > 0 { + for _, value := range broker.Status.DatabasesInDataguardConfig { + if value != "" { + databases = append(databases, value) + } + } + + return databases + } + + databases = append(databases, broker.Spec.PrimaryDatabaseRef) + databases = append(databases, broker.Spec.StandbyDatabaseRefs...) + return databases +} + +// ////////////////////////////////////////////////////////////////////////////////////////////////// +// Returns standby databases in the dataguard configuration from the resource status/spec +// ////////////////////////////////////////////////////////////////////////////////////////////////// +func (broker *DataguardBroker) GetStandbyDatabasesInDgConfig() []string { + var databases []string + if len(broker.Status.DatabasesInDataguardConfig) > 0 { + for _, value := range broker.Status.DatabasesInDataguardConfig { + if value != "" && value != broker.Status.PrimaryDatabase { + databases = append(databases, value) + } + } + + return databases + } + + databases = append(databases, broker.Spec.StandbyDatabaseRefs...) + return databases +} + +//+kubebuilder:object:root=true + +// DataguardBrokerList contains a list of DataguardBroker +type DataguardBrokerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DataguardBroker `json:"items"` +} + +func init() { + SchemeBuilder.Register(&DataguardBroker{}, &DataguardBrokerList{}) +} diff --git a/apis/database/v4/dataguardbroker_webhook.go b/apis/database/v4/dataguardbroker_webhook.go new file mode 100644 index 00000000..bcd35de9 --- /dev/null +++ b/apis/database/v4/dataguardbroker_webhook.go @@ -0,0 +1,55 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +// log is for logging in this package. +var dataguardbrokerlog = logf.Log.WithName("dataguardbroker-resource") + +func (r *DataguardBroker) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! diff --git a/apis/database/v4/dbcssystem_conversion.go b/apis/database/v4/dbcssystem_conversion.go new file mode 100644 index 00000000..e5919f54 --- /dev/null +++ b/apis/database/v4/dbcssystem_conversion.go @@ -0,0 +1,4 @@ +package v4 + +// Hub defines v1 as the hub version +func (*DbcsSystem) Hub() {} diff --git a/apis/database/v4/dbcssystem_kms_types.go b/apis/database/v4/dbcssystem_kms_types.go new file mode 100644 index 00000000..8cbff504 --- /dev/null +++ b/apis/database/v4/dbcssystem_kms_types.go @@ -0,0 +1,141 @@ +/* +** Copyright (c) 2022-2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ +package v4 + +import "encoding/json" + +type KMSConfig struct { + VaultName string `json:"vaultName,omitempty"` + CompartmentId string `json:"compartmentId,omitempty"` + KeyName string `json:"keyName,omitempty"` + EncryptionAlgo string `json:"encryptionAlgo,omitempty"` + VaultType string `json:"vaultType,omitempty"` +} +type KMSDetailsStatus struct { + VaultId string `json:"vaultId,omitempty"` + ManagementEndpoint string `json:"managementEndpoint,omitempty"` + KeyId string `json:"keyId,omitempty"` + VaultName string `json:"vaultName,omitempty"` + CompartmentId string `json:"compartmentId,omitempty"` + KeyName string `json:"keyName,omitempty"` + EncryptionAlgo string `json:"encryptionAlgo,omitempty"` + VaultType string `json:"vaultType,omitempty"` +} + +const ( + lastSuccessfulKMSConfig = "lastSuccessfulKMSConfig" + lastSuccessfulKMSStatus = "lastSuccessfulKMSStatus" +) + +// GetLastSuccessfulKMSConfig returns the KMS config from the last successful reconciliation. +// Returns nil, nil if there is no lastSuccessfulKMSConfig. +func (dbcs *DbcsSystem) GetLastSuccessfulKMSConfig() (*KMSConfig, error) { + val, ok := dbcs.GetAnnotations()[lastSuccessfulKMSConfig] + if !ok { + return nil, nil + } + + configBytes := []byte(val) + kmsConfig := KMSConfig{} + + err := json.Unmarshal(configBytes, &kmsConfig) + if err != nil { + return nil, err + } + + return &kmsConfig, nil +} + +// GetLastSuccessfulKMSStatus returns the KMS status from the last successful reconciliation. +// Returns nil, nil if there is no lastSuccessfulKMSStatus. +func (dbcs *DbcsSystem) GetLastSuccessfulKMSStatus() (*KMSDetailsStatus, error) { + val, ok := dbcs.GetAnnotations()[lastSuccessfulKMSStatus] + if !ok { + return nil, nil + } + + statusBytes := []byte(val) + kmsStatus := KMSDetailsStatus{} + + err := json.Unmarshal(statusBytes, &kmsStatus) + if err != nil { + return nil, err + } + + return &kmsStatus, nil +} + +// SetLastSuccessfulKMSConfig saves the given KMSConfig to the annotations. +func (dbcs *DbcsSystem) SetLastSuccessfulKMSConfig(kmsConfig *KMSConfig) error { + configBytes, err := json.Marshal(kmsConfig) + if err != nil { + return err + } + + annotations := dbcs.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + } + annotations[lastSuccessfulKMSConfig] = string(configBytes) + dbcs.SetAnnotations(annotations) + return nil +} + +// SetLastSuccessfulKMSStatus saves the given KMSDetailsStatus to the annotations. +func (dbcs *DbcsSystem) SetLastSuccessfulKMSStatus(kmsStatus *KMSDetailsStatus) error { + statusBytes, err := json.Marshal(kmsStatus) + if err != nil { + return err + } + + annotations := dbcs.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + } + annotations[lastSuccessfulKMSStatus] = string(statusBytes) + dbcs.SetAnnotations(annotations) + // Update KMSDetailsStatus in DbcsSystemStatus + dbcs.Status.KMSDetailsStatus = KMSDetailsStatus{ + VaultName: kmsStatus.VaultName, + CompartmentId: kmsStatus.CompartmentId, + KeyName: kmsStatus.KeyName, + EncryptionAlgo: kmsStatus.EncryptionAlgo, + VaultType: kmsStatus.VaultType, + } + return nil +} diff --git a/apis/database/v4/dbcssystem_pdbconfig_types.go b/apis/database/v4/dbcssystem_pdbconfig_types.go new file mode 100644 index 00000000..2ae361e5 --- /dev/null +++ b/apis/database/v4/dbcssystem_pdbconfig_types.go @@ -0,0 +1,83 @@ +/* +** Copyright (c) 2022-2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ +package v4 + +// PDBConfig defines details of PDB struct for DBCS systems +type PDBConfig struct { + // The name for the pluggable database (PDB). The name is unique in the context of a Database. The name must begin with an alphabetic character and can contain a maximum of thirty alphanumeric characters. Special characters are not permitted. The pluggable database name should not be same as the container database name. + PdbName *string `mandatory:"true" json:"pdbName"` + + // The OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the CDB + // ContainerDatabaseId *string `mandatory:"false" json:"containerDatabaseId"` + + // // A strong password for PDB Admin. The password must be at least nine characters and contain at least two uppercase, two lowercase, two numbers, and two special characters. The special characters must be _, \#, or -. + PdbAdminPassword *string `mandatory:"false" json:"pdbAdminPassword"` + + // // The existing TDE wallet password of the CDB. + TdeWalletPassword *string `mandatory:"false" json:"tdeWalletPassword"` + + // // The locked mode of the pluggable database admin account. If false, the user needs to provide the PDB Admin Password to connect to it. + // // If true, the pluggable database will be locked and user cannot login to it. + ShouldPdbAdminAccountBeLocked *bool `mandatory:"false" json:"shouldPdbAdminAccountBeLocked"` + + // // Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. + // // For more information, see Resource Tags (https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). + // // Example: `{"Department": "Finance"}` + FreeformTags map[string]string `mandatory:"false" json:"freeformTags"` + + // // Defined tags for this resource. Each key is predefined and scoped to a namespace. + // // For more information, see Resource Tags (https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). + // DefinedTags map[string]map[string]interface{} `mandatory:"false" json:"definedTags"` + + // To specify whether to delete the PDB + IsDelete *bool `mandatory:"false" json:"isDelete,omitempty"` + + // The OCID of the PDB for deletion purposes. + PluggableDatabaseId *string `mandatory:"false" json:"pluggableDatabaseId,omitempty"` +} + +type PDBConfigStatus struct { + PdbName *string `mandatory:"false" json:"pdbName,omitempty"` + ShouldPdbAdminAccountBeLocked *bool `mandatory:"false" json:"shouldPdbAdminAccountBeLocked,omitempty"` + FreeformTags map[string]string `mandatory:"false" json:"freeformTags,omitempty"` + PluggableDatabaseId *string `mandatory:"false" json:"pluggableDatabaseId,omitempty"` + PdbLifecycleState LifecycleState `json:"pdbState,omitempty"` +} +type PDBDetailsStatus struct { + PDBConfigStatus []PDBConfigStatus `json:"pdbConfigStatus,omitempty"` +} diff --git a/apis/database/v4/dbcssystem_types.go b/apis/database/v4/dbcssystem_types.go new file mode 100644 index 00000000..9810a3b7 --- /dev/null +++ b/apis/database/v4/dbcssystem_types.go @@ -0,0 +1,292 @@ +/* +** Copyright (c) 2022-2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ +package v4 + +import ( + "encoding/json" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/go-logr/logr" + dbcsv1 "github.com/oracle/oracle-database-operator/commons/annotations" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// DbcsSystemSpec defines the desired state of DbcsSystem +type DbcsSystemSpec struct { + DbSystem DbSystemDetails `json:"dbSystem,omitempty"` + Id *string `json:"id,omitempty"` + OCIConfigMap *string `json:"ociConfigMap"` + OCISecret *string `json:"ociSecret,omitempty"` + DbClone *DbCloneConfig `json:"dbClone,omitempty"` + HardLink bool `json:"hardLink,omitempty"` + PdbConfigs []PDBConfig `json:"pdbConfigs,omitempty"` + SetupDBCloning bool `json:"setupDBCloning,omitempty"` + DbBackupId *string `json:"dbBackupId,omitempty"` + DatabaseId *string `json:"databaseId,omitempty"` + KMSConfig KMSConfig `json:"kmsConfig,omitempty"` +} + +// DbSystemDetails Spec + +type DbSystemDetails struct { + CompartmentId string `json:"compartmentId"` + AvailabilityDomain string `json:"availabilityDomain"` + SubnetId string `json:"subnetId"` + Shape string `json:"shape"` + SshPublicKeys []string `json:"sshPublicKeys,omitempty"` + HostName string `json:"hostName"` + CpuCoreCount int `json:"cpuCoreCount,omitempty"` + FaultDomains []string `json:"faultDomains,omitempty"` + DisplayName string `json:"displayName,omitempty"` + BackupSubnetId string `json:"backupSubnetId,omitempty"` + TimeZone string `json:"timeZone,omitempty"` + NodeCount *int `json:"nodeCount,omitempty"` + PrivateIp string `json:"privateIp,omitempty"` + Domain string `json:"domain,omitempty"` + InitialDataStorageSizeInGB int `json:"initialDataStorageSizeInGB,omitempty"` + ClusterName string `json:"clusterName,omitempty"` + DbAdminPasswordSecret string `json:"dbAdminPasswordSecret"` + DbName string `json:"dbName,omitempty"` + PdbName string `json:"pdbName,omitempty"` + DbDomain string `json:"dbDomain,omitempty"` + DbUniqueName string `json:"dbUniqueName,omitempty"` + StorageManagement string `json:"storageManagement,omitempty"` + DbVersion string `json:"dbVersion,omitempty"` + DbEdition string `json:"dbEdition,omitempty"` + DiskRedundancy string `json:"diskRedundancy,omitempty"` + DbWorkload string `json:"dbWorkload,omitempty"` + LicenseModel string `json:"licenseModel,omitempty"` + TdeWalletPasswordSecret string `json:"tdeWalletPasswordSecret,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + DbBackupConfig Backupconfig `json:"dbBackupConfig,omitempty"` + KMSConfig KMSConfig `json:"kmsConfig,omitempty"` +} + +// DB Backup Config Network Struct +type Backupconfig struct { + AutoBackupEnabled *bool `json:"autoBackupEnabled,omitempty"` + RecoveryWindowsInDays *int `json:"recoveryWindowsInDays,omitempty"` + AutoBackupWindow *string `json:"autoBackupWindow,omitempty"` + BackupDestinationDetails *string `json:"backupDestinationDetails,omitempty"` +} + +// DbcsSystemStatus defines the observed state of DbcsSystem +type DbcsSystemStatus struct { + Id *string `json:"id,omitempty"` + DisplayName string `json:"displayName,omitempty"` + AvailabilityDomain string `json:"availabilityDomain,omitempty"` + SubnetId string `json:"subnetId,omitempty"` + StorageManagement string `json:"storageManagement,omitempty"` + NodeCount int `json:"nodeCount,omitempty"` + CpuCoreCount int `json:"cpuCoreCount,omitempty"` + + DbEdition string `json:"dbEdition,omitempty"` + TimeZone string `json:"timeZone,omitempty"` + DataStoragePercentage *int `json:"dataStoragePercentage,omitempty"` + LicenseModel string `json:"licenseModel,omitempty"` + DataStorageSizeInGBs *int `json:"dataStorageSizeInGBs,omitempty"` + RecoStorageSizeInGB *int `json:"recoStorageSizeInGB,omitempty"` + + Shape *string `json:"shape,omitempty"` + State LifecycleState `json:"state"` + DbInfo []DbStatus `json:"dbInfo,omitempty"` + Network VmNetworkDetails `json:"network,omitempty"` + WorkRequests []DbWorkrequests `json:"workRequests,omitempty"` + KMSDetailsStatus KMSDetailsStatus `json:"kmsDetailsStatus,omitempty"` + DbCloneStatus DbCloneStatus `json:"dbCloneStatus,omitempty"` + PdbDetailsStatus []PDBDetailsStatus `json:"pdbDetailsStatus,omitempty"` +} + +// DbcsSystemStatus defines the observed state of DbcsSystem +type DbStatus struct { + Id *string `json:"id,omitempty"` + DbName string `json:"dbName,omitempty"` + DbUniqueName string `json:"dbUniqueName,omitempty"` + DbWorkload string `json:"dbWorkload,omitempty"` + DbHomeId string `json:"dbHomeId,omitempty"` +} + +type DbWorkrequests struct { + OperationType *string `json:"operationType,omitmpty"` + OperationId *string `json:"operationId,omitemty"` + PercentComplete string `json:"percentComplete,omitempty"` + TimeAccepted string `json:"timeAccepted,omitempty"` + TimeStarted string `json:"timeStarted,omitempty"` + TimeFinished string `json:"timeFinished,omitempty"` +} + +type VmNetworkDetails struct { + VcnName *string `json:"vcnName,omitempty"` + SubnetName *string `json:"clientSubnet,omitempty"` + ScanDnsName *string `json:"scanDnsName,omitempty"` + HostName string `json:"hostName,omitempty"` + DomainName string `json:"domainName,omitempty"` + ListenerPort *int `json:"listenerPort,omitempty"` + NetworkSG string `json:"networkSG,omitempty"` +} + +// DbCloneConfig defines the configuration for the database clone +type DbCloneConfig struct { + DbAdminPasswordSecret string `json:"dbAdminPasswordSecret,omitempty"` + TdeWalletPasswordSecret string `json:"tdeWalletPasswordSecret,omitempty"` + DbName string `json:"dbName"` + HostName string `json:"hostName"` + DbUniqueName string `json:"dbDbUniqueName"` + DisplayName string `json:"displayName"` + LicenseModel string `json:"licenseModel,omitempty"` + Domain string `json:"domain,omitempty"` + SshPublicKeys []string `json:"sshPublicKeys,omitempty"` + SubnetId string `json:"subnetId"` + SidPrefix string `json:"sidPrefix,omitempty"` + InitialDataStorageSizeInGB int `json:"initialDataStorageSizeInGB,omitempty"` + KmsKeyId string `json:"kmsKeyId,omitempty"` + KmsKeyVersionId string `json:"kmsKeyVersionId,omitempty"` + PrivateIp string `json:"privateIp,omitempty"` +} + +// DbCloneStatus defines the observed state of DbClone +type DbCloneStatus struct { + Id *string `json:"id,omitempty"` + DbAdminPaswordSecret string `json:"dbAdminPaswordSecret,omitempty"` + DbName string `json:"dbName,omitempty"` + HostName string `json:"hostName"` + DbUniqueName string `json:"dbDbUniqueName"` + DisplayName string `json:"displayName,omitempty"` + LicenseModel string `json:"licenseModel,omitempty"` + Domain string `json:"domain,omitempty"` + SshPublicKeys []string `json:"sshPublicKeys,omitempty"` + SubnetId string `json:"subnetId,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=dbcssystems,scope=Namespaced +// +kubebuilder:storageversion +// +kubebuilder:storageversion + +// DbcsSystem is the Schema for the dbcssystems API +type DbcsSystem struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DbcsSystemSpec `json:"spec,omitempty"` + Status DbcsSystemStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// DbcsSystemList contains a list of DbcsSystem +type DbcsSystemList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DbcsSystem `json:"items"` +} + +type LifecycleState string + +const ( + Available LifecycleState = "AVAILABLE" + Failed LifecycleState = "FAILED" + Update LifecycleState = "UPDATING" + Provision LifecycleState = "PROVISIONING" + Terminate LifecycleState = "TERMINATED" +) + +const lastSuccessfulSpec = "lastSuccessfulSpec" + +// GetLastSuccessfulSpec returns spec from the lass successful reconciliation. +// Returns nil, nil if there is no lastSuccessfulSpec. +func (dbcs *DbcsSystem) GetLastSuccessfulSpec() (*DbcsSystemSpec, error) { + val, ok := dbcs.GetAnnotations()[lastSuccessfulSpec] + if !ok { + return nil, nil + } + + specBytes := []byte(val) + sucSpec := DbcsSystemSpec{} + + err := json.Unmarshal(specBytes, &sucSpec) + if err != nil { + return nil, err + } + + return &sucSpec, nil +} +func (dbcs *DbcsSystem) GetLastSuccessfulSpecWithLog(log logr.Logger) (*DbcsSystemSpec, error) { + val, ok := dbcs.GetAnnotations()[lastSuccessfulSpec] + if !ok { + log.Info("No last successful spec annotation found") + return nil, nil + } + + specBytes := []byte(val) + sucSpec := DbcsSystemSpec{} + + err := json.Unmarshal(specBytes, &sucSpec) + if err != nil { + log.Error(err, "Failed to unmarshal last successful spec") + return nil, err + } + + log.Info("Successfully retrieved last successful spec", "spec", sucSpec) + return &sucSpec, nil +} + +// UpdateLastSuccessfulSpec updates lastSuccessfulSpec with the current spec. +func (dbcs *DbcsSystem) UpdateLastSuccessfulSpec(kubeClient client.Client) error { + specBytes, err := json.Marshal(dbcs.Spec) + if err != nil { + return err + } + + anns := map[string]string{ + lastSuccessfulSpec: string(specBytes), + } + + // return dbcsv1.SetAnnotations(kubeClient, dbcs, anns) + return dbcsv1.PatchAnnotations(kubeClient, dbcs, anns) + +} + +func init() { + SchemeBuilder.Register(&DbcsSystem{}, &DbcsSystemList{}) +} diff --git a/apis/database/v4/dbcssystem_webhook.go b/apis/database/v4/dbcssystem_webhook.go new file mode 100644 index 00000000..c3ff8ddb --- /dev/null +++ b/apis/database/v4/dbcssystem_webhook.go @@ -0,0 +1,98 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var dbcssystemlog = logf.Log.WithName("dbcssystem-resource") + +func (r *DbcsSystem) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! + +//+kubebuilder:webhook:path=/mutate-database-oracle-com-v4-dbcssystem,mutating=true,failurePolicy=fail,sideEffects=none,groups=database.oracle.com,resources=dbcssystems,verbs=create;update,versions=v4,name=mdbcssystemv4.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &DbcsSystem{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *DbcsSystem) Default() { + dbcssystemlog.Info("default", "name", r.Name) + + // TODO(user): fill in your defaulting logic. +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. + +// +kubebuilder:webhook:verbs=create;update;delete,path=/validate-database-oracle-com-v4-dbcssystem,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=dbcssystems,versions=v4,name=vdbcssystemv4.kb.io,admissionReviewVersions=v1 +var _ webhook.Validator = &DbcsSystem{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *DbcsSystem) ValidateCreate() (admission.Warnings, error) { + dbcssystemlog.Info("validate create", "name", r.Name) + + // // TODO(user): fill in your validation logic upon object creation. + return nil, nil +} + +// // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *DbcsSystem) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + dbcssystemlog.Info("validate update", "name", r.Name) + + // // TODO(user): fill in your validation logic upon object update. + return nil, nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *DbcsSystem) ValidateDelete() (admission.Warnings, error) { + dbcssystemlog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} diff --git a/apis/database/v4/groupversion_info.go b/apis/database/v4/groupversion_info.go new file mode 100644 index 00000000..6644b93c --- /dev/null +++ b/apis/database/v4/groupversion_info.go @@ -0,0 +1,58 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +// Package v4 contains API Schema definitions for the database v4 API group +// +kubebuilder:object:generate=true +// +groupName=database.oracle.com +package v4 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "database.oracle.com", Version: "v4"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/database/v4/lrest_types.go b/apis/database/v4/lrest_types.go new file mode 100644 index 00000000..421a3ea1 --- /dev/null +++ b/apis/database/v4/lrest_types.go @@ -0,0 +1,191 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// LRESTSpec defines the desired state of LREST +type LRESTSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Name of the LREST + LRESTName string `json:"cdbName,omitempty"` + // Name of the LREST Service + ServiceName string `json:"serviceName,omitempty"` + + // Password for the LREST System Administrator + SysAdminPwd LRESTSysAdminPassword `json:"sysAdminPwd,omitempty"` + // User in the root container with sysdba priviledges to manage PDB lifecycle + LRESTAdminUser LRESTAdminUser `json:"cdbAdminUser,omitempty"` + // Password for the LREST Administrator to manage PDB lifecycle + LRESTAdminPwd LRESTAdminPassword `json:"cdbAdminPwd,omitempty"` + + LRESTTlsKey LRESTTLSKEY `json:"cdbTlsKey,omitempty"` + LRESTTlsCrt LRESTTLSCRT `json:"cdbTlsCrt,omitempty"` + LRESTPubKey LRESTPUBKEY `json:"cdbPubKey,omitempty"` + LRESTPriKey LRESTPRVKEY `json:"cdbPrvKey,omitempty"` + + // Password for user LREST_PUBLIC_USER + LRESTPwd LRESTPassword `json:"lrestPwd,omitempty"` + // LREST server port. For now, keep it as 8888. TO BE USED IN FUTURE RELEASE. + LRESTPort int `json:"lrestPort,omitempty"` + // LREST Image Name + LRESTImage string `json:"lrestImage,omitempty"` + // The name of the image pull secret in case of a private docker repository. + LRESTImagePullSecret string `json:"lrestImagePullSecret,omitempty"` + // LREST Image Pull Policy + // +kubebuilder:validation:Enum=Always;Never + LRESTImagePullPolicy string `json:"lrestImagePullPolicy,omitempty"` + // Number of LREST Containers to create + Replicas int `json:"replicas,omitempty"` + // Web Server User with SQL Administrator role to allow us to authenticate to the PDB Lifecycle Management REST endpoints + WebLrestServerUser WebLrestServerUser `json:"webServerUser,omitempty"` + // Password for the Web Server User + WebLrestServerPwd WebLrestServerPassword `json:"webServerPwd,omitempty"` + // Name of the DB server + DBServer string `json:"dbServer,omitempty"` + // DB server port + DBPort int `json:"dbPort,omitempty"` + // Node Selector for running the Pod + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + DBTnsurl string `json:"dbTnsurl,omitempty"` + DeletePDBCascade bool `json:"deletePdbCascade,omitempty"` +} + +// LRESTSecret defines the secretName +type LRESTSecret struct { + SecretName string `json:"secretName"` + Key string `json:"key"` +} + +// LRESTSysAdminPassword defines the secret containing SysAdmin Password mapped to key 'sysAdminPwd' for LREST +type LRESTSysAdminPassword struct { + Secret LRESTSecret `json:"secret"` +} + +// LRESTAdminUser defines the secret containing LREST Administrator User mapped to key 'lrestAdminUser' to manage PDB lifecycle +type LRESTAdminUser struct { + Secret LRESTSecret `json:"secret"` +} + +// LRESTAdminPassword defines the secret containing LREST Administrator Password mapped to key 'lrestAdminPwd' to manage PDB lifecycle +type LRESTAdminPassword struct { + Secret LRESTSecret `json:"secret"` +} + +// LRESTPassword defines the secret containing LREST_PUBLIC_USER Password mapped to key 'ordsPwd' +type LRESTPassword struct { + Secret LRESTSecret `json:"secret"` +} + +// WebLrestServerUser defines the secret containing Web Server User mapped to key 'webServerUser' to manage PDB lifecycle +type WebLrestServerUser struct { + Secret LRESTSecret `json:"secret"` +} + +// WebLrestServerPassword defines the secret containing password for Web Server User mapped to key 'webServerPwd' to manage PDB lifecycle +type WebLrestServerPassword struct { + Secret LRESTSecret `json:"secret"` +} + +type LRESTTLSKEY struct { + Secret LRESTSecret `json:"secret"` +} + +type LRESTTLSCRT struct { + Secret LRESTSecret `json:"secret"` +} + +type LRESTPUBKEY struct { + Secret LRESTSecret `json:"secret"` +} + +type LRESTPRVKEY struct { + Secret LRESTSecret `json:"secret"` +} + +// LRESTStatus defines the observed state of LREST +type LRESTStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Phase of the LREST Resource + Phase string `json:"phase"` + // LREST Resource Status + Status bool `json:"status"` + // Message + Msg string `json:"msg,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".spec.cdbName",name="CDB NAME",type="string",description="Name of the LREST" +// +kubebuilder:printcolumn:JSONPath=".spec.dbServer",name="DB Server",type="string",description=" Name of the DB Server" +// +kubebuilder:printcolumn:JSONPath=".spec.dbPort",name="DB Port",type="integer",description="DB server port" +// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name="Replicas",type="integer",description="Replicas" +// +kubebuilder:printcolumn:JSONPath=".status.phase",name="Status",type="string",description="Status of the LREST Resource" +// +kubebuilder:printcolumn:JSONPath=".status.msg",name="Message",type="string",description="Error message if any" +// +kubebuilder:printcolumn:JSONPath=".spec.dbTnsurl",name="TNS STRING",type="string",description="string of the tnsalias" +// +kubebuilder:resource:path=lrests,scope=Namespaced +// +kubebuilder:storageversion + +// LREST is the Schema for the lrests API +type LREST struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec LRESTSpec `json:"spec,omitempty"` + Status LRESTStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// LRESTList contains a list of LREST +type LRESTList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LREST `json:"items"` +} + +func init() { + SchemeBuilder.Register(&LREST{}, &LRESTList{}) +} diff --git a/apis/database/v4/lrest_webhook.go b/apis/database/v4/lrest_webhook.go new file mode 100644 index 00000000..9d65a1d6 --- /dev/null +++ b/apis/database/v4/lrest_webhook.go @@ -0,0 +1,219 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + "reflect" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var lrestlog = logf.Log.WithName("lrest-webhook") + +func (r *LREST) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +//+kubebuilder:webhook:path=/mutate-database-oracle-com-v4-lrest,mutating=true,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=lrests,verbs=create;update,versions=v4,name=mlrest.kb.io,admissionReviewVersions={v4,v1beta1} + +var _ webhook.Defaulter = &LREST{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *LREST) Default() { + lrestlog.Info("Setting default values in LREST spec for : " + r.Name) + + if r.Spec.LRESTPort == 0 { + r.Spec.LRESTPort = 8888 + } + + if r.Spec.Replicas == 0 { + r.Spec.Replicas = 1 + } +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:path=/validate-database-oracle-com-v4-lrest,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=lrests,verbs=create;update,versions=v4,name=vlrest.kb.io,admissionReviewVersions={v4,v1beta1} + +var _ webhook.Validator = &LREST{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *LREST) ValidateCreate() (admission.Warnings, error) { + lrestlog.Info("ValidateCreate", "name", r.Name) + + var allErrs field.ErrorList + + if r.Spec.ServiceName == "" && r.Spec.DBServer != "" { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("serviceName"), "Please specify LREST Service name")) + } + + if reflect.ValueOf(r.Spec.LRESTTlsKey).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("lrestTlsKey"), "Please specify LREST Tls key(secret)")) + } + + if reflect.ValueOf(r.Spec.LRESTTlsCrt).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("lrestTlsCrt"), "Please specify LREST Tls Certificate(secret)")) + } + + /*if r.Spec.SCANName == "" { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("scanName"), "Please specify SCAN Name for LREST")) + }*/ + + if (r.Spec.DBServer == "" && r.Spec.DBTnsurl == "") || (r.Spec.DBServer != "" && r.Spec.DBTnsurl != "") { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("dbServer"), "Please specify Database Server Name/IP Address or tnsalias string")) + } + + if r.Spec.DBTnsurl != "" && (r.Spec.DBServer != "" || r.Spec.DBPort != 0 || r.Spec.ServiceName != "") { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("dbServer"), "DBtnsurl is orthogonal to (DBServer,DBport,Services)")) + } + + if r.Spec.DBPort == 0 && r.Spec.DBServer != "" { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("dbPort"), "Please specify DB Server Port")) + } + if r.Spec.DBPort < 0 && r.Spec.DBServer != "" { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("dbPort"), "Please specify a valid DB Server Port")) + } + if r.Spec.LRESTPort < 0 { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("ordsPort"), "Please specify a valid LREST Port")) + } + if r.Spec.Replicas < 0 { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("replicas"), "Please specify a valid value for Replicas")) + } + if r.Spec.LRESTImage == "" { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("ordsImage"), "Please specify name of LREST Image to be used")) + } + if reflect.ValueOf(r.Spec.LRESTAdminUser).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("lrestAdminUser"), "Please specify user in the root container with sysdba priviledges to manage PDB lifecycle")) + } + if reflect.ValueOf(r.Spec.LRESTAdminPwd).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("lrestAdminPwd"), "Please specify password for the LREST Administrator to manage PDB lifecycle")) + } + /* if reflect.ValueOf(r.Spec.LRESTPwd).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("ordsPwd"), "Please specify password for user LREST_PUBLIC_USER")) + } */ + if reflect.ValueOf(r.Spec.WebLrestServerUser).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("webLrestServerUser"), "Please specify the Web Server User having SQL Administrator role")) + } + if reflect.ValueOf(r.Spec.WebLrestServerPwd).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("webServerPwd"), "Please specify password for the Web Server User having SQL Administrator role")) + } + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "LREST"}, + r.Name, allErrs) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *LREST) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + lrestlog.Info("validate update", "name", r.Name) + + isLRESTMarkedToBeDeleted := r.GetDeletionTimestamp() != nil + if isLRESTMarkedToBeDeleted { + return nil, nil + } + + var allErrs field.ErrorList + + // Check for updation errors + oldLREST, ok := old.(*LREST) + if !ok { + return nil, nil + } + + if r.Spec.DBPort < 0 { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("dbPort"), "Please specify a valid DB Server Port")) + } + if r.Spec.LRESTPort < 0 { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("ordsPort"), "Please specify a valid LREST Port")) + } + if r.Spec.Replicas < 0 { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("replicas"), "Please specify a valid value for Replicas")) + } + if !strings.EqualFold(oldLREST.Spec.ServiceName, r.Spec.ServiceName) { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("replicas"), "cannot be changed")) + } + + if len(allErrs) == 0 { + return nil, nil + } + + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "LREST"}, + r.Name, allErrs) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *LREST) ValidateDelete() (admission.Warnings, error) { + lrestlog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} diff --git a/apis/database/v4/lrpdb_types.go b/apis/database/v4/lrpdb_types.go new file mode 100644 index 00000000..d37bebdc --- /dev/null +++ b/apis/database/v4/lrpdb_types.go @@ -0,0 +1,256 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// LRPDBSpec defines the desired state of LRPDB +type LRPDBSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + LRPDBTlsKey LRPDBTLSKEY `json:"lrpdbTlsKey,omitempty"` + LRPDBTlsCrt LRPDBTLSCRT `json:"lrpdbTlsCrt,omitempty"` + LRPDBTlsCat LRPDBTLSCAT `json:"lrpdbTlsCat,omitempty"` + LRPDBPriKey LRPDBPRVKEY `json:"cdbPrvKey,omitempty"` + + // Namespace of the rest server + CDBNamespace string `json:"cdbNamespace,omitempty"` + // Name of the CDB Custom Resource that runs the LREST container + CDBResName string `json:"cdbResName,omitempty"` + // Name of the CDB + CDBName string `json:"cdbName,omitempty"` + // The name of the new LRPDB. Relevant for both Create and Plug Actions. + LRPDBName string `json:"pdbName,omitempty"` + // Name of the Source LRPDB from which to clone + SrcLRPDBName string `json:"srcPdbName,omitempty"` + // The administrator username for the new LRPDB. This property is required when the Action property is Create. + AdminName LRPDBAdminName `json:"adminName,omitempty"` + // The administrator password for the new LRPDB. This property is required when the Action property is Create. + AdminPwd LRPDBAdminPassword `json:"adminPwd,omitempty"` + // Relevant for Create and Plug operations. As defined in the Oracle Multitenant Database documentation. Values can be a filename convert pattern or NONE. + AdminpdbUser AdminpdbUser `json:"adminpdbUser,omitempty"` + AdminpdbPass AdminpdbPass `json:"adminpdbPass,omitempty"` + + FileNameConversions string `json:"fileNameConversions,omitempty"` + // This property is required when the Action property is Plug. As defined in the Oracle Multitenant Database documentation. Values can be a source filename convert pattern or NONE. + SourceFileNameConversions string `json:"sourceFileNameConversions,omitempty"` + // XML metadata filename to be used for Plug or Unplug operations + XMLFileName string `json:"xmlFileName,omitempty"` + // To copy files or not while cloning a LRPDB + // +kubebuilder:validation:Enum=COPY;NOCOPY;MOVE + CopyAction string `json:"copyAction,omitempty"` + // Specify if datafiles should be removed or not. The value can be INCLUDING or KEEP (default). + // +kubebuilder:validation:Enum=INCLUDING;KEEP + DropAction string `json:"dropAction,omitempty"` + // A Path specified for sparse clone snapshot copy. (Optional) + SparseClonePath string `json:"sparseClonePath,omitempty"` + // Whether to reuse temp file + ReuseTempFile *bool `json:"reuseTempFile,omitempty"` + // Relevant for Create and Plug operations. True for unlimited storage. Even when set to true, totalSize and tempSize MUST be specified in the request if Action is Create. + UnlimitedStorage *bool `json:"unlimitedStorage,omitempty"` + // Indicate if 'AS CLONE' option should be used in the command to plug in a LRPDB. This property is applicable when the Action property is PLUG but not required. + AsClone *bool `json:"asClone,omitempty"` + // Relevant for create and plug operations. Total size as defined in the Oracle Multitenant Database documentation. See size_clause description in Database SQL Language Reference documentation. + TotalSize string `json:"totalSize,omitempty"` + // Relevant for Create and Clone operations. Total size for temporary tablespace as defined in the Oracle Multitenant Database documentation. See size_clause description in Database SQL Language Reference documentation. + TempSize string `json:"tempSize,omitempty"` + // Web Server User with SQL Administrator role to allow us to authenticate to the PDB Lifecycle Management REST endpoints + WebLrpdbServerUser WebLrpdbServerUser `json:"webServerUser,omitempty"` + // Password for the Web Server User + WebLrpdbServerPwd WebLrpdbServerPassword `json:"webServerPwd,omitempt"` + // TDE import for plug operations + LTDEImport *bool `json:"tdeImport,omitempty"` + // LTDE export for unplug operations + LTDEExport *bool `json:"tdeExport,omitempty"` + // TDE password if the tdeImport or tdeExport flag is set to true. Can be used in create, plug or unplug operations + LTDEPassword LTDEPwd `json:"tdePassword,omitempty"` + // LTDE keystore path is required if the tdeImport or tdeExport flag is set to true. Can be used in plug or unplug operations. + LTDEKeystorePath string `json:"tdeKeystorePath,omitempty"` + // LTDE secret is required if the tdeImport or tdeExport flag is set to true. Can be used in plug or unplug operations. + LTDESecret LTDESecret `json:"tdeSecret,omitempty"` + // Whether you need the script only or execute the script + GetScript *bool `json:"getScript,omitempty"` + // Action to be taken: Create/Clone/Plug/Unplug/Delete/Modify/Status/Map/Alter. Map is used to map a Databse LRPDB to a Kubernetes LRPDB CR. + // +kubebuilder:validation:Enum=Create;Clone;Plug;Unplug;Delete;Modify;Status;Map;Alter;Noaction + Action string `json:"action"` + // Extra options for opening and closing a LRPDB + // +kubebuilder:validation:Enum=IMMEDIATE;NORMAL;READ ONLY;READ WRITE;RESTRICTED + ModifyOption string `json:"modifyOption,omitempty"` + // to be used with ALTER option - obsolete do not use + AlterSystem string `json:"alterSystem,omitempty"` + // to be used with ALTER option - the name of the parameter + AlterSystemParameter string `json:"alterSystemParameter"` + // to be used with ALTER option - the value of the parameter + AlterSystemValue string `json:"alterSystemValue"` + // parameter scope + ParameterScope string `json:"parameterScope,omitempty"` + // The target state of the LRPDB + // +kubebuilder:validation:Enum=OPEN;CLOSE;ALTER + LRPDBState string `json:"pdbState,omitempty"` + // turn on the assertive approach to delete pdb resource + // kubectl delete pdb ..... automatically triggers the pluggable database + // deletion + AssertiveLrpdbDeletion bool `json:"assertiveLrpdbDeletion,omitempty"` + PDBConfigMap string `json:"pdbconfigmap,omitempty"` +} + +// LRPDBAdminName defines the secret containing Sys Admin User mapped to key 'adminName' for LRPDB +type LRPDBAdminName struct { + Secret LRPDBSecret `json:"secret"` +} + +// LRPDBAdminPassword defines the secret containing Sys Admin Password mapped to key 'adminPwd' for LRPDB +type LRPDBAdminPassword struct { + Secret LRPDBSecret `json:"secret"` +} + +// TDEPwd defines the secret containing TDE Wallet Password mapped to key 'tdePassword' for LRPDB +type LTDEPwd struct { + Secret LRPDBSecret `json:"secret"` +} + +// TDESecret defines the secret containing TDE Secret to key 'tdeSecret' for LRPDB +type LTDESecret struct { + Secret LRPDBSecret `json:"secret"` +} + +type WebLrpdbServerUser struct { + Secret LRPDBSecret `json:"secret"` +} + +type WebLrpdbServerPassword struct { + Secret LRPDBSecret `json:"secret"` +} + +type AdminpdbUser struct { + Secret LRPDBSecret `json:"secret"` +} + +type AdminpdbPass struct { + Secret LRPDBSecret `json:"secret"` +} + +// LRPDBSecret defines the secretName +type LRPDBSecret struct { + SecretName string `json:"secretName"` + Key string `json:"key"` +} + +type LRPDBTLSKEY struct { + Secret LRPDBSecret `json:"secret"` +} + +type LRPDBTLSCRT struct { + Secret LRPDBSecret `json:"secret"` +} + +type LRPDBTLSCAT struct { + Secret LRPDBSecret `json:"secret"` +} + +type LRPDBPRVKEY struct { + Secret LRPDBSecret `json:"secret"` +} + +// LRPDBStatus defines the observed state of LRPDB +type LRPDBStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // LRPDB Connect String + ConnString string `json:"connString,omitempty"` + // Phase of the LRPDB Resource + Phase string `json:"phase"` + // LRPDB Resource Status + Status bool `json:"status"` + // Total size of the LRPDB + TotalSize string `json:"totalSize,omitempty"` + // Open mode of the LRPDB + OpenMode string `json:"openMode,omitempty"` + // Modify Option of the LRPDB + ModifyOption string `json:"modifyOption,omitempty"` + // Message + Msg string `json:"msg,omitempty"` + // Last Completed Action + Action string `json:"action,omitempty"` + // Last Completed alter system + AlterSystem string `json:"alterSystem,omitempty"` + // Last ORA- + SqlCode int `json:"sqlCode"` + Bitstat int `json:"bitstat,omitempty"` /* Bitmask */ + BitStatStr string `json:"bitstatstr,omitempty"` /* Decoded bitmask */ +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".spec.cdbName",name="CDB Name",type="string",description="Name of the CDB" +// +kubebuilder:printcolumn:JSONPath=".spec.pdbName",name="PDB Name",type="string",description="Name of the PDB" +// +kubebuilder:printcolumn:JSONPath=".status.openMode",name="PDB State",type="string",description="PDB Open Mode" +// +kubebuilder:printcolumn:JSONPath=".status.totalSize",name="PDB Size",type="string",description="Total Size of the PDB" +// +kubebuilder:printcolumn:JSONPath=".status.phase",name="Status",type="string",description="Status of the LRPDB Resource" +// +kubebuilder:printcolumn:JSONPath=".status.msg",name="Message",type="string",description="Error message, if any" +// +kubebuilder:printcolumn:JSONPath=".status.sqlCode",name="last sqlcode",type="integer",description="last sqlcode" +// +kubebuilder:printcolumn:JSONPath=".status.connString",name="Connect_String",type="string",description="The connect string to be used" +// +kubebuilder:resource:path=lrpdbs,scope=Namespaced +// +kubebuilder:storageversion + +// LRPDB is the Schema for the pdbs API +type LRPDB struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec LRPDBSpec `json:"spec,omitempty"` + Status LRPDBStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// LRPDBList contains a list of LRPDB +type LRPDBList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LRPDB `json:"items"` +} + +func init() { + SchemeBuilder.Register(&LRPDB{}, &LRPDBList{}) +} diff --git a/apis/database/v4/lrpdb_webhook.go b/apis/database/v4/lrpdb_webhook.go new file mode 100644 index 00000000..d6807926 --- /dev/null +++ b/apis/database/v4/lrpdb_webhook.go @@ -0,0 +1,370 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +/* MODIFIED (MM/DD/YY) +** rcitton 07/14/22 - 33822886 + */ + +package v4 + +import ( + "context" + "fmt" + "reflect" + "strconv" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var lrpdblog = logf.Log.WithName("lrpdb-webhook") + +func (r *LRPDB) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + WithValidator(&LRPDB{}). + WithDefaulter(&LRPDB{}). + For(r). + Complete() +} + +//+kubebuilder:webhook:path=/mutate-database-oracle-com-v4-lrpdb,mutating=true,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=lrpdbs,verbs=create;update,versions=v4,name=mlrpdb.kb.io,admissionReviewVersions={v4,v1beta1} + +var _ webhook.CustomDefaulter = &LRPDB{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *LRPDB) Default(ctx context.Context, obj runtime.Object) error { + pdb, ok := obj.(*LRPDB) + if !ok { + return fmt.Errorf("expected an LRPDB object but got %T", obj) + } + lrpdblog.Info("Setting default values in LRPDB spec for : " + pdb.Name) + + action := strings.ToUpper(pdb.Spec.Action) + + if action == "DELETE" { + if pdb.Spec.DropAction == "" { + pdb.Spec.DropAction = "KEEP" + lrpdblog.Info(" - dropAction : KEEP") + } + } else if action != "MODIFY" && action != "STATUS" { + if pdb.Spec.ReuseTempFile == nil { + pdb.Spec.ReuseTempFile = new(bool) + *pdb.Spec.ReuseTempFile = true + lrpdblog.Info(" - reuseTempFile : " + strconv.FormatBool(*(pdb.Spec.ReuseTempFile))) + } + if pdb.Spec.UnlimitedStorage == nil { + pdb.Spec.UnlimitedStorage = new(bool) + *pdb.Spec.UnlimitedStorage = true + lrpdblog.Info(" - unlimitedStorage : " + strconv.FormatBool(*(pdb.Spec.UnlimitedStorage))) + } + if pdb.Spec.LTDEImport == nil { + pdb.Spec.LTDEImport = new(bool) + *pdb.Spec.LTDEImport = false + lrpdblog.Info(" - tdeImport : " + strconv.FormatBool(*(pdb.Spec.LTDEImport))) + } + if pdb.Spec.LTDEExport == nil { + pdb.Spec.LTDEExport = new(bool) + *pdb.Spec.LTDEExport = false + lrpdblog.Info(" - tdeExport : " + strconv.FormatBool(*(pdb.Spec.LTDEExport))) + } + if pdb.Spec.AsClone == nil { + pdb.Spec.AsClone = new(bool) + *pdb.Spec.AsClone = false + lrpdblog.Info(" - asClone : " + strconv.FormatBool(*(pdb.Spec.AsClone))) + } + } + + if pdb.Spec.GetScript == nil { + pdb.Spec.GetScript = new(bool) + *pdb.Spec.GetScript = false + lrpdblog.Info(" - getScript : " + strconv.FormatBool(*(pdb.Spec.GetScript))) + } + return nil +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:path=/validate-database-oracle-com-v4-lrpdb,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=lrpdbs,verbs=create;update,versions=v4,name=vlrpdb.kb.io,admissionReviewVersions={v4,v1beta1} + +var _ webhook.CustomValidator = &LRPDB{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *LRPDB) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + lrpdblog.Info("ValidateCreate-Validating LRPDB spec for : " + r.Name) + pdb := obj.(*LRPDB) + + var allErrs field.ErrorList + + r.validateCommon(&allErrs, ctx, *pdb) + + r.validateAction(&allErrs, ctx, *pdb) + + action := strings.ToUpper(pdb.Spec.Action) + + if len(allErrs) == 0 { + lrpdblog.Info("LRPDB Resource : " + r.Name + " successfully validated for Action : " + action) + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "LRPDB"}, + r.Name, allErrs) + return nil, nil +} + +// Validate Action for required parameters +func (r *LRPDB) validateAction(allErrs *field.ErrorList, ctx context.Context, pdb LRPDB) { + action := strings.ToUpper(pdb.Spec.Action) + + lrpdblog.Info("Valdiating LRPDB Resource Action : " + action) + + if reflect.ValueOf(pdb.Spec.LRPDBTlsKey).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("lrpdbTlsKey"), "Please specify LRPDB Tls Key(secret)")) + } + + if reflect.ValueOf(pdb.Spec.LRPDBTlsCrt).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("lrpdbTlsCrt"), "Please specify LRPDB Tls Certificate(secret)")) + } + + if reflect.ValueOf(pdb.Spec.LRPDBTlsCat).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("lrpdbTlsCat"), "Please specify LRPDB Tls Certificate Authority(secret)")) + } + + switch action { + case "DELETE": + /* BUG 36752336 - LREST OPERATOR - DELETE NON-EXISTENT PDB SHOWS LRPDB CREATED MESSAGE */ + if pdb.Status.OpenMode == "READ WRITE" { + lrpdblog.Info("Cannot delete: pdb is open ") + *allErrs = append(*allErrs, field.Invalid(field.NewPath("status").Child("OpenMode"), "READ WRITE", "pdb "+pdb.Spec.LRPDBName+" "+pdb.Status.OpenMode)) + } + r.CheckObjExistence("DELETE", allErrs, ctx, pdb) + case "CREATE": + if reflect.ValueOf(pdb.Spec.AdminpdbUser).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("adminpdbUser"), "Please specify LRPDB System Administrator user")) + } + if reflect.ValueOf(pdb.Spec.AdminpdbPass).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("adminpdbPass"), "Please specify LRPDB System Administrator Password")) + } + if pdb.Spec.FileNameConversions == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("fileNameConversions"), "Please specify a value for fileNameConversions. Values can be a filename convert pattern or NONE")) + } + if pdb.Spec.TotalSize == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("totalSize"), "When the storage is not UNLIMITED the Total Size must be specified")) + } + if pdb.Spec.TempSize == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("tempSize"), "When the storage is not UNLIMITED the Temp Size must be specified")) + } + if *(pdb.Spec.LTDEImport) { + r.validateTDEInfo(allErrs, ctx, pdb) + } + case "CLONE": + // Sample Err: The LRPDB "lrpdb1-clone" is invalid: spec.srcPdbName: Required value: Please specify source LRPDB for Cloning + if pdb.Spec.SrcLRPDBName == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("srcPdbName"), "Please specify source LRPDB name for Cloning")) + } + if pdb.Spec.TotalSize == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("totalSize"), "When the storage is not UNLIMITED the Total Size must be specified")) + } + if pdb.Spec.TempSize == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("tempSize"), "When the storage is not UNLIMITED the Temp Size must be specified")) + } + if pdb.Status.OpenMode == "MOUNT" { + lrpdblog.Info("Cannot clone: pdb is mount ") + *allErrs = append(*allErrs, field.Invalid(field.NewPath("status").Child("OpenMode"), "READ WRITE", "pdb "+pdb.Spec.LRPDBName+" "+pdb.Status.OpenMode)) + } + case "PLUG": + if pdb.Spec.XMLFileName == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("xmlFileName"), "Please specify XML metadata filename")) + } + if pdb.Spec.FileNameConversions == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("fileNameConversions"), "Please specify a value for fileNameConversions. Values can be a filename convert pattern or NONE")) + } + if pdb.Spec.SourceFileNameConversions == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("sourceFileNameConversions"), "Please specify a value for sourceFileNameConversions. Values can be a filename convert pattern or NONE")) + } + if pdb.Spec.CopyAction == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("copyAction"), "Please specify a value for copyAction. Values can be COPY, NOCOPY or MOVE")) + } + if *(pdb.Spec.LTDEImport) { + r.validateTDEInfo(allErrs, ctx, pdb) + } + case "UNPLUG": + if pdb.Spec.XMLFileName == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("xmlFileName"), "Please specify XML metadata filename")) + } + if *(pdb.Spec.LTDEExport) { + r.validateTDEInfo(allErrs, ctx, pdb) + } + if pdb.Status.OpenMode == "READ WRITE" { + lrpdblog.Info("Cannot unplug: pdb is open ") + *allErrs = append(*allErrs, field.Invalid(field.NewPath("status").Child("OpenMode"), "READ WRITE", "pdb "+pdb.Spec.LRPDBName+" "+pdb.Status.OpenMode)) + } + r.CheckObjExistence("UNPLUG", allErrs, ctx, pdb) + case "MODIFY": + + if pdb.Spec.LRPDBState == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("lrpdbState"), "Please specify target state of LRPDB")) + } + if pdb.Spec.ModifyOption == "" && pdb.Spec.AlterSystem == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("modifyOption"), "Please specify an option for opening/closing a LRPDB or alter system parameter")) + } + r.CheckObjExistence("MODIFY", allErrs, ctx, pdb) + } +} + +func (r *LRPDB) CheckObjExistence(action string, allErrs *field.ErrorList, ctx context.Context, pdb LRPDB) { + /* BUG 36752465 - lrest operator - open non-existent pdb creates a lrpdb with status failed */ + lrpdblog.Info("Action [" + action + "] checkin " + pdb.Spec.LRPDBName + " existence") + if pdb.Status.OpenMode == "" { + *allErrs = append(*allErrs, field.NotFound(field.NewPath("Spec").Child("LRPDBName"), " "+pdb.Spec.LRPDBName+" does not exist : action "+action+" failure")) + + } +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *LRPDB) ValidateUpdate(ctx context.Context, obj runtime.Object, old runtime.Object) (admission.Warnings, error) { + lrpdblog.Info("ValidateUpdate-Validating LRPDB spec for : " + r.Name) + pdb := old.(*LRPDB) + + isLRPDBMarkedToBeDeleted := r.GetDeletionTimestamp() != nil + if isLRPDBMarkedToBeDeleted { + return nil, nil + } + + var allErrs field.ErrorList + action := strings.ToUpper(pdb.Spec.Action) + + // If LRPDB CR has been created and in Ready state, only allow updates if the "action" value has changed as well + if (pdb.Status.Phase == "Ready") && (pdb.Status.Action != "MODIFY") && (pdb.Status.Action != "STATUS") && (pdb.Status.Action != "NOACTION") && (pdb.Status.Action == action) { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("action"), "New action also needs to be specified after LRPDB is in Ready state")) + } else { + + // Check Common Validations + r.validateCommon(&allErrs, ctx, *pdb) + + // Validate required parameters for Action specified + r.validateAction(&allErrs, ctx, *pdb) + + // Check TDE requirements + if (action != "DELETE") && (action != "MODIFY") && (action != "STATUS") && (*(pdb.Spec.LTDEImport) || *(pdb.Spec.LTDEExport)) { + r.validateTDEInfo(&allErrs, ctx, *pdb) + } + } + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "LRPDB"}, + r.Name, allErrs) + return nil, nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *LRPDB) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + lrpdblog.Info("ValidateDelete-Validating LRPDB spec for : " + r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} + +// Validate common specs needed for all LRPDB Actions +func (r *LRPDB) validateCommon(allErrs *field.ErrorList, ctx context.Context, pdb LRPDB) { + lrpdblog.Info("validateCommon", "name", pdb.Name) + + if pdb.Spec.Action == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("action"), "Please specify LRPDB operation to be performed")) + } + if pdb.Spec.CDBResName == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("cdbResName"), "Please specify the name of the CDB Kubernetes resource to use for LRPDB operations")) + } + if pdb.Spec.CDBNamespace == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("cdbNamespace"), "Please specify the namespace of the rest server to use for LRPDB operations")) + } + if pdb.Spec.LRPDBName == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("lrpdbName"), "Please specify name of the LRPDB to be created")) + } +} + +// Validate TDE information for Create, Plug and Unplug Actions +func (r *LRPDB) validateTDEInfo(allErrs *field.ErrorList, ctx context.Context, pdb LRPDB) { + lrpdblog.Info("validateTDEInfo", "name", r.Name) + + if reflect.ValueOf(pdb.Spec.LTDEPassword).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("tdePassword"), "Please specify a value for tdePassword.")) + } + if pdb.Spec.LTDEKeystorePath == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("tdeKeystorePath"), "Please specify a value for tdeKeystorePath.")) + } + if reflect.ValueOf(pdb.Spec.LTDESecret).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("tdeSecret"), "Please specify a value for tdeSecret.")) + } + +} diff --git a/apis/database/v4/oraclerestdataservice_conversion.go b/apis/database/v4/oraclerestdataservice_conversion.go new file mode 100644 index 00000000..a19cdfd5 --- /dev/null +++ b/apis/database/v4/oraclerestdataservice_conversion.go @@ -0,0 +1,4 @@ +package v4 + +// Hub defines v1 as the hub version +func (*OracleRestDataService) Hub() {} diff --git a/apis/database/v4/oraclerestdataservice_types.go b/apis/database/v4/oraclerestdataservice_types.go new file mode 100644 index 00000000..20cc7a74 --- /dev/null +++ b/apis/database/v4/oraclerestdataservice_types.go @@ -0,0 +1,158 @@ +/* +** Copyright (c) 2023 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// OracleRestDataServiceSpec defines the desired state of OracleRestDataService +type OracleRestDataServiceSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + DatabaseRef string `json:"databaseRef"` + LoadBalancer bool `json:"loadBalancer,omitempty"` + ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Image OracleRestDataServiceImage `json:"image,omitempty"` + OrdsPassword OracleRestDataServicePassword `json:"ordsPassword"` + AdminPassword OracleRestDataServicePassword `json:"adminPassword"` + OrdsUser string `json:"ordsUser,omitempty"` + RestEnableSchemas []OracleRestDataServiceRestEnableSchemas `json:"restEnableSchemas,omitempty"` + OracleService string `json:"oracleService,omitempty"` + ServiceAccountName string `json:"serviceAccountName,omitempty"` + Persistence OracleRestDataServicePersistence `json:"persistence,omitempty"` + MongoDbApi bool `json:"mongoDbApi,omitempty"` + + // +k8s:openapi-gen=true + // +kubebuilder:validation:Minimum=1 + Replicas int `json:"replicas,omitempty"` + ReadinessCheckPeriod int `json:"readinessCheckPeriod,omitempty"` +} + +// OracleRestDataServicePersistence defines the storage releated params +type OracleRestDataServicePersistence struct { + Size string `json:"size,omitempty"` + StorageClass string `json:"storageClass,omitempty"` + + // +kubebuilder:validation:Enum=ReadWriteOnce;ReadWriteMany + AccessMode string `json:"accessMode,omitempty"` + VolumeName string `json:"volumeName,omitempty"` + SetWritePermissions *bool `json:"setWritePermissions,omitempty"` +} + +// OracleRestDataServiceImage defines the Image source and pullSecrets for POD +type OracleRestDataServiceImage struct { + Version string `json:"version,omitempty"` + PullFrom string `json:"pullFrom"` + PullSecrets string `json:"pullSecrets,omitempty"` +} + +// OracleRestDataServicePassword defines the secret containing Password mapped to secretKey +type OracleRestDataServicePassword struct { + SecretName string `json:"secretName"` + // +kubebuilder:default:="oracle_pwd" + SecretKey string `json:"secretKey,omitempty"` + KeepSecret *bool `json:"keepSecret,omitempty"` +} + +// OracleRestDataServicePDBSchemas defines the PDB Schemas to be ORDS Enabled +type OracleRestDataServiceRestEnableSchemas struct { + PdbName string `json:"pdbName,omitempty"` + SchemaName string `json:"schemaName"` + UrlMapping string `json:"urlMapping,omitempty"` + Enable bool `json:"enable"` +} + +// OracleRestDataServiceStatus defines the observed state of OracleRestDataService +type OracleRestDataServiceStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + Status string `json:"status,omitempty"` + DatabaseApiUrl string `json:"databaseApiUrl,omitempty"` + LoadBalancer string `json:"loadBalancer,omitempty"` + DatabaseRef string `json:"databaseRef,omitempty"` + ServiceIP string `json:"serviceIP,omitempty"` + DatabaseActionsUrl string `json:"databaseActionsUrl,omitempty"` + MongoDbApiAccessUrl string `json:"mongoDbApiAccessUrl,omitempty"` + OrdsInstalled bool `json:"ordsInstalled,omitempty"` + ApexConfigured bool `json:"apexConfigured,omitempty"` + ApxeUrl string `json:"apexUrl,omitempty"` + MongoDbApi bool `json:"mongoDbApi,omitempty"` + CommonUsersCreated bool `json:"commonUsersCreated,omitempty"` + Replicas int `json:"replicas,omitempty"` + + Image OracleRestDataServiceImage `json:"image,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".status.status",name="Status",type="string" +// +kubebuilder:printcolumn:JSONPath=".spec.databaseRef",name="Database",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.databaseApiUrl",name="Database API URL",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.databaseActionsUrl",name="Database Actions URL",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.apexUrl",name="Apex URL",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.mongoDbApiAccessUrl",name="MongoDbApi Access URL",type="string" + +// OracleRestDataService is the Schema for the oraclerestdataservices API +// +kubebuilder:storageversion +type OracleRestDataService struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec OracleRestDataServiceSpec `json:"spec,omitempty"` + Status OracleRestDataServiceStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// OracleRestDataServiceList contains a list of OracleRestDataService +type OracleRestDataServiceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OracleRestDataService `json:"items"` +} + +func init() { + SchemeBuilder.Register(&OracleRestDataService{}, &OracleRestDataServiceList{}) +} diff --git a/apis/database/v4/oraclerestdataservice_webhook.go b/apis/database/v4/oraclerestdataservice_webhook.go new file mode 100644 index 00000000..5211528a --- /dev/null +++ b/apis/database/v4/oraclerestdataservice_webhook.go @@ -0,0 +1,55 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +// log is for logging in this package. +var oraclerestdataservicelog = logf.Log.WithName("oraclerestdataservice-resource") + +func (r *OracleRestDataService) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! diff --git a/apis/database/v4/ordssrvs_types.go b/apis/database/v4/ordssrvs_types.go new file mode 100644 index 00000000..1fbf820a --- /dev/null +++ b/apis/database/v4/ordssrvs_types.go @@ -0,0 +1,693 @@ +/* +** Copyright (c) 2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// OrdsSrvsSpec defines the desired state of OrdsSrvs +// +kubebuilder:resource:shortName="ords" +type OrdsSrvsSpec struct { + // Specifies the desired Kubernetes Workload + //+kubebuilder:validation:Enum=Deployment;StatefulSet;DaemonSet + //+kubebuilder:default=Deployment + WorkloadType string `json:"workloadType,omitempty"` + // Defines the number of desired Replicas when workloadType is Deployment or StatefulSet + //+kubebuilder:validation:Minimum=1 + //+kubebuilder:default=1 + Replicas int32 `json:"replicas,omitempty"` + // Specifies whether to restart pods when Global or Pool configurations change + ForceRestart bool `json:"forceRestart,omitempty"` + // Specifies the ORDS container image + //+kubecbuilder:default=container-registry.oracle.com/database/ords:latest + Image string `json:"image"` + // Specifies the ORDS container image pull policy + //+kubebuilder:validation:Enum=IfNotPresent;Always;Never + //+kubebuilder:default=IfNotPresent + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + // Specifies the Secret Name for pulling the ORDS container image + ImagePullSecrets string `json:"imagePullSecrets,omitempty"` + // Contains settings that are configured across the entire ORDS instance. + GlobalSettings GlobalSettings `json:"globalSettings"` + // Contains settings for individual pools/databases + // Private key + EncPrivKey PasswordSecret `json:"encPrivKey,omitempty"` + PoolSettings []*PoolSettings `json:"poolSettings,omitempty"` + // +k8s:openapi-gen=true + +} + +type GlobalSettings struct { + // Specifies the setting to enable or disable metadata caching. + CacheMetadataEnabled *bool `json:"cache.metadata.enabled,omitempty"` + + // Specifies the duration after a GraphQL schema is not accessed from the cache that it expires. + CacheMetadataGraphQLExpireAfterAccess *time.Duration `json:"cache.metadata.graphql.expireAfterAccess,omitempty"` + + // Specifies the duration after a GraphQL schema is cached that it expires and has to be loaded again. + CacheMetadataGraphQLExpireAfterWrite *time.Duration `json:"cache.metadata.graphql.expireAfterWrite,omitempty"` + + // Specifies the setting to determine for how long a metadata record remains in the cache. + // Longer duration means, it takes longer to view the applied changes. + // The formats accepted are based on the ISO-8601 duration format. + CacheMetadataTimeout *time.Duration `json:"cache.metadata.timeout,omitempty"` + + // Specifies the setting to enable or disable JWKS caching. + CacheMetadataJWKSEnabled *bool `json:"cache.metadata.jwks.enabled,omitempty"` + + // Specifies the initial capacity of the JWKS cache. + CacheMetadataJWKSInitialCapacity *int32 `json:"cache.metadata.jwks.initialCapacity,omitempty"` + + // Specifies the maximum capacity of the JWKS cache. + CacheMetadataJWKSMaximumSize *int32 `json:"cache.metadata.jwks.maximumSize,omitempty"` + + // Specifies the duration after a JWK is not accessed from the cache that it expires. + // By default this is disabled. + CacheMetadataJWKSExpireAfterAccess *time.Duration `json:"cache.metadata.jwks.expireAfterAccess,omitempty"` + + // Specifies the duration after a JWK is cached, that is, it expires and has to be loaded again. + CacheMetadataJWKSExpireAfterWrite *time.Duration `json:"cache.metadata.jwks.expireAfterWrite,omitempty"` + + // Specifies whether the Database API is enabled. + DatabaseAPIEnabled *bool `json:"database.api.enabled,omitempty"` + + // Specifies to disable the Database API administration related services. + // Only applicable when Database API is enabled. + DatabaseAPIManagementServicesDisabled *bool `json:"database.api.management.services.disabled,omitempty"` + + // Specifies how long to wait before retrying an invalid pool. + DBInvalidPoolTimeout *time.Duration `json:"db.invalidPoolTimeout,omitempty"` + + // Specifies the maximum join nesting depth limit for GraphQL queries. + FeatureGraphQLMaxNestingDepth *int32 `json:"feature.grahpql.max.nesting.depth,omitempty"` + + // Specifies the name of the HTTP request header that uniquely identifies the request end to end as + // it passes through the various layers of the application stack. + // In Oracle this header is commonly referred to as the ECID (Entity Context ID). + RequestTraceHeaderName string `json:"request.traceHeaderName,omitempty"` + + // Specifies the maximum number of unsuccessful password attempts allowed. + // Enabled by setting a positive integer value. + SecurityCredentialsAttempts *int32 `json:"security.credentials.attempts,omitempty"` + + // Specifies the period to lock the account that has exceeded maximum attempts. + SecurityCredentialsLockTime *time.Duration `json:"security.credentials.lock.time,omitempty"` + + // Specifies the HTTP listen port. + //+kubebuilder:default:=8080 + StandaloneHTTPPort *int32 `json:"standalone.http.port,omitempty"` + + // Specifies the SSL certificate hostname. + StandaloneHTTPSHost string `json:"standalone.https.host,omitempty"` + + // Specifies the HTTPS listen port. + //+kubebuilder:default:=8443 + StandaloneHTTPSPort *int32 `json:"standalone.https.port,omitempty"` + + // Specifies the period for Standalone Mode to wait until it is gracefully shutdown. + StandaloneStopTimeout *time.Duration `json:"standalone.stop.timeout,omitempty"` + + // Specifies whether to display error messages on the browser. + DebugPrintDebugToScreen *bool `json:"debug.printDebugToScreen,omitempty"` + + // Specifies how the HTTP error responses must be formatted. + // html - Force all responses to be in HTML format + // json - Force all responses to be in JSON format + // auto - Automatically determines most appropriate format for the request (default). + ErrorResponseFormat string `json:"error.responseFormat,omitempty"` + + // Specifies the Internet Content Adaptation Protocol (ICAP) port to virus scan files. + // Either icap.port or icap.secure.port are required to have a value. + ICAPPort *int32 `json:"icap.port,omitempty"` + + // Specifies the Internet Content Adaptation Protocol (ICAP) port to virus scan files. + // Either icap.port or icap.secure.port are required to have a value. + // If values for both icap.port and icap.secure.port are provided, then the value of icap.port is ignored. + ICAPSecurePort *int32 `json:"icap.secure.port,omitempty"` + + // Specifies the Internet Content Adaptation Protocol (ICAP) server name or IP address to virus scan files. + // The icap.server is required to have a value. + ICAPServer string `json:"icap.server,omitempty"` + + // Specifies whether procedures are to be logged. + LogProcedure bool `json:"log.procedure,omitempty"` + + // Specifies to enable the API for MongoDB. + //+kubebuider:default=false + MongoEnabled bool `json:"mongo.enabled,omitempty"` + + // Specifies the API for MongoDB listen port. + //+kubebuilder:default:=27017 + MongoPort *int32 `json:"mongo.port,omitempty"` + + // Specifies the maximum idle time for a Mongo connection in milliseconds. + MongoIdleTimeout *time.Duration `json:"mongo.idle.timeout,omitempty"` + + // Specifies the maximum time for a Mongo database operation in milliseconds. + MongoOpTimeout *time.Duration `json:"mongo.op.timeout,omitempty"` + + // If this value is set to true, then the Oracle REST Data Services internal exclusion list is not enforced. + // Oracle recommends that you do not set this value to true. + SecurityDisableDefaultExclusionList *bool `json:"security.disableDefaultExclusionList,omitempty"` + + // Specifies a pattern for procedures, packages, or schema names which are forbidden to be directly executed from a browser. + SecurityExclusionList string `json:"security.exclusionList,omitempty"` + + // Specifies a pattern for procedures, packages, or schema names which are allowed to be directly executed from a browser. + SecurityInclusionList string `json:"security.inclusionList,omitempty"` + + // Specifies the maximum number of cached procedure validations. + // Set this value to 0 to force the validation procedure to be invoked on each request. + SecurityMaxEntries *int32 `json:"security.maxEntries,omitempty"` + + // Specifies whether HTTPS is available in your environment. + SecurityVerifySSL *bool `json:"security.verifySSL,omitempty"` + + // Specifies the context path where ords is located. + //+kubebuilder:default:="/ords" + StandaloneContextPath string `json:"standalone.context.path,omitempty"` + + /************************************************* + * Undocumented + /************************************************/ + + // Specifies that the HTTP Header contains the specified text + // Usually set to 'X-Forwarded-Proto: https' coming from a load-balancer + SecurityHTTPSHeaderCheck string `json:"security.httpsHeaderCheck,omitempty"` + + // Specifies to force HTTPS; this is set to default to false as in real-world TLS should + // terminiate at the LoadBalancer + SecurityForceHTTPS bool `json:"security.forceHTTPS,omitempty"` + + // Specifies to trust Access from originating domains + SecuirtyExternalSessionTrustedOrigins string `json:"security.externalSessionTrustedOrigins,omitempty"` + + /************************************************* + * Customised + /************************************************/ + /* Below are settings with physical path/file locations to be replaced by ConfigMaps/Secrets, Boolean or HardCoded */ + + /* + // Specifies the path to the folder to store HTTP request access logs. + // If not specified, then no access log is generated. + // HARDCODED + // StandaloneAccessLog string `json:"standalone.access.log,omitempty"` + */ + + // Specifies if HTTP request access logs should be enabled + // If enabled, logs will be written to /opt/oracle/sa/log/global + //+kubebuilder:default:=false + EnableStandaloneAccessLog bool `json:"enable.standalone.access.log,omitempty"` + + // Specifies if HTTP request access logs should be enabled + // If enabled, logs will be written to /opt/oracle/sa/log/global + //+kubebuilder:default:=false + EnableMongoAccessLog bool `json:"enable.mongo.access.log,omitempty"` + + /* + //Specifies the SSL certificate path. + // If you are providing the SSL certificate, then you must specify the certificate location. + // Replaced with: CertSecret *CertificateSecret `json:"certSecret,omitempty"` + //StandaloneHTTPSCert string `json:"standalone.https.cert"` + + // Specifies the SSL certificate key path. + // If you are providing the SSL certificate, you must specify the certificate key location. + // Replaced with: CertSecret *CertificateSecret `json:"certSecret,omitempty"` + //StandaloneHTTPSCertKey string `json:"standalone.https.cert.key"` + */ + + // Specifies the Secret containing the SSL Certificates + // Replaces: standalone.https.cert and standalone.https.cert.key + CertSecret *CertificateSecret `json:"certSecret,omitempty"` + + /************************************************* + * Disabled + /************************************************* + // Specifies the comma separated list of host names or IP addresses to identify a specific network + // interface on which to listen. + //+kubebuilder:default:="0.0.0.0" + //StandaloneBinds string `json:"standalone.binds,omitempty"` + // This is disabled as containerised + + // Specifies the file where credentials are stored. + //SecurityCredentialsFile string `json:"security.credentials.file,omitempty"` + // WTF does this do?!?! + + // Points to the location where static resources to be served under the / root server path are located. + // StandaloneDocRoot string `json:"standalone.doc.root,omitempty"` + // Maybe this gets implemented; difficult to predict valid use case + + // Specifies the path to a folder that contains the custom error page. + // ErrorExternalPath string `json:"error.externalPath,omitempty"` + // Can see use-case; but wait for implementation + + // Specifies the Context path where APEX static resources are located. + //+kubebuilder:default:="/i" + // StandaloneStaticContextPath string `json:"standalone.static.context.path,omitempty"` + // Does anyone ever change this? If so, need to also change the APEX install configmap to update path + */ + + // Specifies the path to the folder containing static resources required by APEX. + // StandaloneStaticPath string `json:"standalone.static.path,omitempty"` + // This is disabled as will use the container image path (/opt/oracle/apex/$ORDS_VER/images) + // HARDCODED into the entrypoint + + // Specifies a comma separated list of host names or IP addresses to identify a specific + // network interface on which to listen. + //+kubebuilder:default:="0.0.0.0" + // MongoHost string `json:"mongo.host,omitempty"` + // This is disabled as containerised + + // Specifies the path to the folder where you want to store the API for MongoDB access logs. + // MongoAccessLog string `json:"mongo.access.log,omitempty"` + // HARDCODED to global/logs +} + +type PoolSettings struct { + // Specifies the Pool Name + PoolName string `json:"poolName"` + + // Specify whether to perform ORDS installation/upgrades automatically + // The db.adminUser and db.adminUser.secret must be set, otherwise setting is ignored + // This setting will be ignored for ADB + //+kubebuilder:default:=false + AutoUpgradeORDS bool `json:"autoUpgradeORDS,omitempty"` + + // Specify whether to perform APEX installation/upgrades automatically + // The db.adminUser and db.adminUser.secret must be set, otherwise setting is ignored + // This setting will be ignored for ADB + //+kubebuilder:default:=false + AutoUpgradeAPEX bool `json:"autoUpgradeAPEX,omitempty"` + + // Specifies the name of the database user for the connection. + // For non-ADB this will default to ORDS_PUBLIC_USER + // For ADBs this must be specified and not ORDS_PUBLIC_USER + // If ORDS_PUBLIC_USER is specified for an ADB, the workload will fail + //+kubebuilder:default:="ORDS_PUBLIC_USER" + DBUsername string `json:"db.username,omitempty"` + + // Specifies the password of the specified database user. + // Replaced by: DBSecret PasswordSecret `json:"dbSecret"` + // DBPassword struct{} `json:"dbPassword,omitempty"` + + // Specifies the Secret with the dbUsername and dbPassword values + // for the connection. + DBSecret PasswordSecret `json:"db.secret"` + + // Specifies the username for the database account that ORDS uses for administration operations in the database. + DBAdminUser string `json:"db.adminUser,omitempty"` + + // Specifies the password for the database account that ORDS uses for administration operations in the database. + // Replaced by: DBAdminUserSecret PasswordSecret `json:"dbAdminUserSecret,omitempty"` + // DBAdminUserPassword struct{} `json:"db.adminUser.password,omitempty"` + + // Specifies the Secret with the dbAdminUser (SYS) and dbAdminPassword values + // for the database account that ORDS uses for administration operations in the database. + // replaces: db.adminUser.password + DBAdminUserSecret PasswordSecret `json:"db.adminUser.secret,omitempty"` + + // Specifies the username for the database account that ORDS uses for the Pluggable Database Lifecycle Management. + DBCDBAdminUser string `json:"db.cdb.adminUser,omitempty"` + + // Specifies the password for the database account that ORDS uses for the Pluggable Database Lifecycle Management. + // Replaced by: DBCdbAdminUserSecret PasswordSecret `json:"dbCdbAdminUserSecret,omitempty"` + // DBCdbAdminUserPassword struct{} `json:"db.cdb.adminUser.password,omitempty"` + + // Specifies the Secret with the dbCdbAdminUser (SYS) and dbCdbAdminPassword values + // Specifies the username for the database account that ORDS uses for the Pluggable Database Lifecycle Management. + // Replaces: db.cdb.adminUser.password + DBCDBAdminUserSecret PasswordSecret `json:"db.cdb.adminUser.secret,omitempty"` + + // Specifies the comma delimited list of additional roles to assign authenticated APEX administrator type users. + ApexSecurityAdministratorRoles string `json:"apex.security.administrator.roles,omitempty"` + + // Specifies the comma delimited list of additional roles to assign authenticated regular APEX users. + ApexSecurityUserRoles string `json:"apex.security.user.roles,omitempty"` + + // Specifies the source for database credentials when creating a direct connection for running SQL statements. + // Value can be one of pool or request. + // If the value is pool, then the credentials defined in this pool is used to create a JDBC connection. + // If the value request is used, then the credentials in the request is used to create a JDBC connection and if successful, grants the requestor SQL Developer role. + //+kubebuilder:validation:Enum=pool;request + DBCredentialsSource string `json:"db.credentialsSource,omitempty"` + + // Indicates how long to wait to gracefully destroy a pool before moving to forcefully destroy all connections including borrowed ones. + DBPoolDestroyTimeout *time.Duration `json:"db.poolDestroyTimeout,omitempty"` + + // Specifies to enable tracking of JDBC resources. + // If not released causes in resource leaks or exhaustion in the database. + // Tracking imposes a performance overhead. + DebugTrackResources *bool `json:"debug.trackResources,omitempty"` + + // Specifies to disable the Open Service Broker services available for the pool. + FeatureOpenservicebrokerExclude *bool `json:"feature.openservicebroker.exclude,omitempty"` + + // Specifies to enable the Database Actions feature. + FeatureSDW *bool `json:"feature.sdw,omitempty"` + + // Specifies a comma separated list of HTTP Cookies to exclude when initializing an Oracle Web Agent environment. + HttpCookieFilter string `json:"http.cookie.filter,omitempty"` + + // Identifies the database role that indicates that the database user must get the SQL Administrator role. + JDBCAuthAdminRole string `json:"jdbc.auth.admin.role,omitempty"` + + // Specifies how a pooled JDBC connection and corresponding database session, is released when a request has been processed. + JDBCCleanupMode string `json:"jdbc.cleanup.mode,omitempty"` + + // If it is true, then it causes a trace of the SQL statements performed by Oracle Web Agent to be echoed to the log. + OwaTraceSql *bool `json:"owa.trace.sql,omitempty"` + + // Indicates if the PL/SQL Gateway functionality should be available for a pool or not. + // Value can be one of disabled, direct, or proxied. + // If the value is direct, then the pool serves the PL/SQL Gateway requests directly. + // If the value is proxied, the PLSQL_GATEWAY_CONFIG view is used to determine the user to whom to proxy. + //+kubebuilder:validation:Enum=disabled;direct;proxied + PlsqlGatewayMode string `json:"plsql.gateway.mode,omitempty"` + + // Specifies whether the JWT Profile authentication is available. Supported values: + SecurityJWTProfileEnabled *bool `json:"security.jwt.profile.enabled,omitempty"` + + // Specifies the maximum number of bytes read from the JWK url. + SecurityJWKSSize *int32 `json:"security.jwks.size,omitempty"` + + // Specifies the maximum amount of time before timing-out when accessing a JWK url. + SecurityJWKSConnectionTimeout *time.Duration `json:"security.jwks.connection.timeout,omitempty"` + + // Specifies the maximum amount of time reading a response from the JWK url before timing-out. + SecurityJWKSReadTimeout *time.Duration `json:"security.jwks.read.timeout,omitempty"` + + // Specifies the minimum interval between refreshing the JWK cached value. + SecurityJWKSRefreshInterval *time.Duration `json:"security.jwks.refresh.interval,omitempty"` + + // Specifies the maximum skew the JWT time claims are accepted. + // This is useful if the clock on the JWT issuer and ORDS differs by a few seconds. + SecurityJWTAllowedSkew *time.Duration `json:"security.jwt.allowed.skew,omitempty"` + + // Specifies the maximum allowed age of a JWT in seconds, regardless of expired claim. + // The age of the JWT is taken from the JWT issued at claim. + SecurityJWTAllowedAge *time.Duration `json:"security.jwt.allowed.age,omitempty"` + + // Indicates the type of security.requestValidationFunction: javascript or plsql. + //+kubebuilder:validation:Enum=plsql;javascript + SecurityValidationFunctionType string `json:"security.validationFunctionType,omitempty"` + + // The type of connection. + //+kubebuilder:validation:Enum=basic;tns;customurl + DBConnectionType string `json:"db.connectionType,omitempty"` + + // Specifies the JDBC URL connection to connect to the database. + DBCustomURL string `json:"db.customURL,omitempty"` + + // Specifies the host system for the Oracle database. + DBHostname string `json:"db.hostname,omitempty"` + + // Specifies the database listener port. + DBPort *int32 `json:"db.port,omitempty"` + + // Specifies the network service name of the database. + DBServicename string `json:"db.servicename,omitempty"` + + // Specifies the name of the database. + DBSid string `json:"db.sid,omitempty"` + + // Specifies the TNS alias name that matches the name in the tnsnames.ora file. + DBTnsAliasName string `json:"db.tnsAliasName,omitempty"` + + // Specifies the service name in the wallet archive for the pool. + DBWalletZipService string `json:"db.wallet.zip.service,omitempty"` + + // Specifies the JDBC driver type. + //+kubebuilder:validation:Enum=thin;oci8 + JDBCDriverType string `json:"jdbc.DriverType,omitempty"` + + // Specifies how long an available connection can remain idle before it is closed. The inactivity connection timeout is in seconds. + JDBCInactivityTimeout *int32 `json:"jdbc.InactivityTimeout,omitempty"` + + // Specifies the initial size for the number of connections that will be created. + // The default is low, and should probably be set higher in most production environments. + JDBCInitialLimit *int32 `json:"jdbc.InitialLimit,omitempty"` + + // Specifies the maximum number of times to reuse a connection before it is discarded and replaced with a new connection. + JDBCMaxConnectionReuseCount *int32 `json:"jdbc.MaxConnectionReuseCount,omitempty"` + + // Sets the maximum connection reuse time property. + JDBCMaxConnectionReuseTime *int32 `json:"jdbc.MaxConnectionReuseTime,omitempty"` + + // Sets the time in seconds to trust an idle connection to skip a validation test. + JDBCSecondsToTrustIdleConnection *int32 `json:"jdbc.SecondsToTrustIdleConnection,omitempty"` + + // Specifies the maximum number of connections. + // Might be too low for some production environments. + JDBCMaxLimit *int32 `json:"jdbc.MaxLimit,omitempty"` + + // Specifies if the PL/SQL Gateway calls can be authenticated using database users. + // If the value is true then this feature is enabled. If the value is false, then this feature is disabled. + // Oracle recommends not to use this feature. + // This feature used only to facilitate customers migrating from mod_plsql. + JDBCAuthEnabled *bool `json:"jdbc.auth.enabled,omitempty"` + + // Specifies the maximum number of statements to cache for each connection. + JDBCMaxStatementsLimit *int32 `json:"jdbc.MaxStatementsLimit,omitempty"` + + // Specifies the minimum number of connections. + JDBCMinLimit *int32 `json:"jdbc.MinLimit,omitempty"` + + // Specifies a timeout period on a statement. + // An abnormally long running query or script, executed by a request, may leave it in a hanging state unless a timeout is + // set on the statement. Setting a timeout on the statement ensures that all the queries automatically timeout if + // they are not completed within the specified time period. + JDBCStatementTimeout *int32 `json:"jdbc.statementTimeout,omitempty"` + + // Specifies the default page to display. The Oracle REST Data Services Landing Page. + MiscDefaultPage string `json:"misc.defaultPage,omitempty"` + + // Specifies the maximum number of rows that will be returned from a query when processing a RESTful service + // and that will be returned from a nested cursor in a result set. + // Affects all RESTful services generated through a SQL query, regardless of whether the resource is paginated. + MiscPaginationMaxRows *int32 `json:"misc.pagination.maxRows,omitempty"` + + // Specifies the procedure name(s) to execute after executing the procedure specified on the URL. + // Multiple procedure names must be separated by commas. + ProcedurePostProcess string `json:"procedurePostProcess,omitempty"` + + // Specifies the procedure name(s) to execute prior to executing the procedure specified on the URL. + // Multiple procedure names must be separated by commas. + ProcedurePreProcess string `json:"procedure.preProcess,omitempty"` + + // Specifies the function to be invoked prior to dispatching each Oracle REST Data Services based REST Service. + // The function can perform configuration of the database session, perform additional validation or authorization of the request. + // If the function returns true, then processing of the request continues. + // If the function returns false, then processing of the request is aborted and an HTTP 403 Forbidden status is returned. + ProcedureRestPreHook string `json:"procedure.rest.preHook,omitempty"` + + // Specifies an authentication function to determine if the requested procedure in the URL should be allowed or disallowed for processing. + // The function should return true if the procedure is allowed; otherwise, it should return false. + // If it returns false, Oracle REST Data Services will return WWW-Authenticate in the response header. + SecurityRequestAuthenticationFunction string `json:"security.requestAuthenticationFunction,omitempty"` + + // Specifies a validation function to determine if the requested procedure in the URL should be allowed or disallowed for processing. + // The function should return true if the procedure is allowed; otherwise, return false. + //+kubebuilder:default:="ords_util.authorize_plsql_gateway" + SecurityRequestValidationFunction string `json:"security.requestValidationFunction,omitempty"` + + // When using the SODA REST API, specifies the default number of documents returned for a GET request on a collection when a + // limit is not specified in the URL. Must be a positive integer, or "unlimited" for no limit. + SODADefaultLimit string `json:"soda.defaultLimit,omitempty"` + + // When using the SODA REST API, specifies the maximum number of documents that will be returned for a GET request on a collection URL, + // regardless of any limit specified in the URL. Must be a positive integer, or "unlimited" for no limit. + SODAMaxLimit string `json:"soda.maxLimit,omitempty"` + + // Specifies whether the REST-Enabled SQL service is active. + RestEnabledSqlActive *bool `json:"restEnabledSql.active,omitempty"` + + /************************************************* + * Customised + /************************************************/ + /* Below are settings with physical path/file locations to be replaced by ConfigMaps/Secrets, Boolean or HardCoded */ + + /* + // Specifies the wallet archive (provided in BASE64 encoding) containing connection details for the pool. + // Replaced with: DBWalletSecret *DBWalletSecret `json:"dbWalletSecret,omitempty"` + DBWalletZip string `json:"db.wallet.zip,omitempty"` + + // Specifies the path to a wallet archive containing connection details for the pool. + // HARDCODED + DBWalletZipPath string `json:"db.wallet.zip.path,omitempty"` + */ + + // Specifies the Secret containing the wallet archive containing connection details for the pool. + // Replaces: db.wallet.zip + DBWalletSecret *DBWalletSecret `json:"dbWalletSecret,omitempty"` + + /* + // The directory location of your tnsnames.ora file. + // Replaced with: TNSAdminSecret *TNSAdminSecret `json:"tnsAdminSecret,omitempty"` + // DBTnsDirectory string `json:"db.tnsDirectory,omitempty"` + */ + + // Specifies the Secret containing the TNS_ADMIN directory + // Replaces: db.tnsDirectory + TNSAdminSecret *TNSAdminSecret `json:"tnsAdminSecret,omitempty"` + + /************************************************* + * Disabled + /************************************************* + // specifies a configuration setting for AutoUpgrade.jar location. + // AutoupgradeAPIAulocation string `json:"autoupgrade.api.aulocation,omitempty"` + // As of 23.4; AutoUpgrade.jar is not part of the container image + + // Specifies a configuration setting to enable AutoUpgrade REST API features. + // AutoupgradeAPIEnabled *bool `json:"autoupgrade.api.enabled,omitempty"` + // Guess this has to do with autoupgrade.api.aulocation which is not implemented + + // Specifies a configuration setting for AutoUpgrade REST API JVM location. + // AutoupgradeAPIJvmlocation string `json:"autoupgrade.api.jvmlocation,omitempty"` + // Guess this has to do with autoupgrade.api.aulocation which is not implemented + + // Specifies a configuration setting for AutoUpgrade REST API log location. + // AutoupgradeAPILoglocation string `json:"autoupgrade.api.loglocation,omitempty"` + // Guess this has to do with autoupgrade.api.aulocation which is not implemented + + // Specifies that the pool points to a CDB, and that the PDBs connected to that CDB should be made addressable + // by Oracle REST Data Services + // DBServiceNameSuffix string `json:"db.serviceNameSuffix,omitempty"` + // Not sure of use case here?!? + */ +} + +type PriVKey struct { + Secret PasswordSecret `json:"secret"` +} + +// Defines the secret containing Password mapped to secretKey +type PasswordSecret struct { + // Specifies the name of the password Secret + SecretName string `json:"secretName"` + // Specifies the key holding the value of the Secret + //+kubebuilder:default:="password" + PasswordKey string `json:"passwordKey,omitempty"` +} + +// Defines the secret containing Certificates +type CertificateSecret struct { + // Specifies the name of the certificate Secret + SecretName string `json:"secretName"` + // Specifies the Certificate + Certificate string `json:"cert"` + // Specifies the Certificate Key + CertificateKey string `json:"key"` +} + +// Defines the secret containing Certificates +type TNSAdminSecret struct { + // Specifies the name of the TNS_ADMIN Secret + SecretName string `json:"secretName"` +} + +// Defines the secret containing Certificates +type DBWalletSecret struct { + // Specifies the name of the Database Wallet Secret + SecretName string `json:"secretName"` + // Specifies the Secret key name containing the Wallet + WalletName string `json:"walletName"` +} + +// OrdsSrvsStatus defines the observed state of OrdsSrvs +type OrdsSrvsStatus struct { + //** PLACE HOLDER + OrdsInstalled bool `json:"ordsInstalled,omitempty"` + //** PLACE HOLDER + // Indicates the current status of the resource + Status string `json:"status,omitempty"` + // Indicates the current Workload type of the resource + WorkloadType string `json:"workloadType,omitempty"` + // Indicates the ORDS version + ORDSVersion string `json:"ordsVersion,omitempty"` + // Indicates the HTTP port of the resource exposed by the pods + HTTPPort *int32 `json:"httpPort,omitempty"` + // Indicates the HTTPS port of the resource exposed by the pods + HTTPSPort *int32 `json:"httpsPort,omitempty"` + // Indicates the MongoAPI port of the resource exposed by the pods (if enabled) + MongoPort int32 `json:"mongoPort,omitempty"` + // Indicates if the resource is out-of-sync with the configuration + RestartRequired bool `json:"restartRequired"` + + // +operator-sdk:csv:customresourcedefinitions:type=status + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:printcolumn:JSONPath=".status.status",name="status",type="string" +//+kubebuilder:printcolumn:JSONPath=".status.workloadType",name="workloadType",type="string" +//+kubebuilder:printcolumn:JSONPath=".status.ordsVersion",name="ordsVersion",type="string" +//+kubebuilder:printcolumn:JSONPath=".status.httpPort",name="httpPort",type="integer" +//+kubebuilder:printcolumn:JSONPath=".status.httpsPort",name="httpsPort",type="integer" +//+kubebuilder:printcolumn:JSONPath=".status.mongoPort",name="MongoPort",type="integer" +//+kubebuilder:printcolumn:JSONPath=".status.restartRequired",name="restartRequired",type="boolean" +//+kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="AGE",type="date" +//+kubebuilder:printcolumn:JSONPath=".status.ordsInstalled",name="OrdsInstalled",type="boolean" +//+kubebuilder:resource:path=ordssrvs,scope=Namespaced + +// OrdsSrvs is the Schema for the ordssrvs API +type OrdsSrvs struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec OrdsSrvsSpec `json:"spec,omitempty"` + Status OrdsSrvsStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// OrdsSrvsList contains a list of OrdsSrvs +type OrdsSrvsList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OrdsSrvs `json:"items"` +} + +func init() { + SchemeBuilder.Register(&OrdsSrvs{}, &OrdsSrvsList{}) +} diff --git a/apis/database/v1alpha1/pdb_types.go b/apis/database/v4/pdb_types.go similarity index 96% rename from apis/database/v1alpha1/pdb_types.go rename to apis/database/v4/pdb_types.go index 8de9db52..16021f12 100644 --- a/apis/database/v1alpha1/pdb_types.go +++ b/apis/database/v4/pdb_types.go @@ -36,7 +36,7 @@ ** SOFTWARE. */ -package v1alpha1 +package v4 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -117,7 +117,9 @@ type PDBSpec struct { // turn on the assertive approach to delete pdb resource // kubectl delete pdb ..... automatically triggers the pluggable database // deletion - AssertivePdbDeletion bool `json:"assertivePdbDeletion,omitempty"` + AssertivePdbDeletion bool `json:"assertivePdbDeletion,omitempty"` + PDBPubKey PDBPUBKEY `json:"pdbOrdsPubKey,omitempty"` + PDBPriKey PDBPRIVKEY `json:"pdbOrdsPrvKey,omitempty"` } // PDBAdminName defines the secret containing Sys Admin User mapped to key 'adminName' for PDB @@ -194,14 +196,15 @@ type PDBStatus struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:printcolumn:JSONPath=".status.connString",name="Connect_String",type="string",description="The connect string to be used" // +kubebuilder:printcolumn:JSONPath=".spec.cdbName",name="CDB Name",type="string",description="Name of the CDB" // +kubebuilder:printcolumn:JSONPath=".spec.pdbName",name="PDB Name",type="string",description="Name of the PDB" // +kubebuilder:printcolumn:JSONPath=".status.openMode",name="PDB State",type="string",description="PDB Open Mode" // +kubebuilder:printcolumn:JSONPath=".status.totalSize",name="PDB Size",type="string",description="Total Size of the PDB" // +kubebuilder:printcolumn:JSONPath=".status.phase",name="Status",type="string",description="Status of the PDB Resource" // +kubebuilder:printcolumn:JSONPath=".status.msg",name="Message",type="string",description="Error message, if any" +// +kubebuilder:printcolumn:JSONPath=".status.connString",name="Connect_String",type="string",description="The connect string to be used" // +kubebuilder:resource:path=pdbs,scope=Namespaced +// +kubebuilder:storageversion // PDB is the Schema for the pdbs API type PDB struct { @@ -221,6 +224,14 @@ type PDBList struct { Items []PDB `json:"items"` } +type PDBPUBKEY struct { + Secret PDBSecret `json:"secret"` +} + +type PDBPRIVKEY struct { + Secret PDBSecret `json:"secret"` +} + func init() { SchemeBuilder.Register(&PDB{}, &PDBList{}) } diff --git a/apis/database/v1alpha1/pdb_webhook.go b/apis/database/v4/pdb_webhook.go similarity index 85% rename from apis/database/v1alpha1/pdb_webhook.go rename to apis/database/v4/pdb_webhook.go index 1577198e..f651accf 100644 --- a/apis/database/v1alpha1/pdb_webhook.go +++ b/apis/database/v4/pdb_webhook.go @@ -40,7 +40,7 @@ ** rcitton 07/14/22 - 33822886 */ -package v1alpha1 +package v4 import ( "reflect" @@ -66,7 +66,7 @@ func (r *PDB) SetupWebhookWithManager(mgr ctrl.Manager) error { Complete() } -//+kubebuilder:webhook:path=/mutate-database-oracle-com-v1alpha1-pdb,mutating=true,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=pdbs,verbs=create;update,versions=v1alpha1,name=mpdb.kb.io,admissionReviewVersions={v1,v1beta1} +//+kubebuilder:webhook:path=/mutate-database-oracle-com-v4-pdb,mutating=true,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=pdbs,verbs=create;update,versions=v4,name=mpdb.kb.io,admissionReviewVersions={v1,v1beta1} var _ webhook.Defaulter = &PDB{} @@ -107,6 +107,7 @@ func (r *PDB) Default() { *r.Spec.AsClone = false pdblog.Info(" - asClone : " + strconv.FormatBool(*(r.Spec.AsClone))) } + } if r.Spec.GetScript == nil { @@ -117,7 +118,7 @@ func (r *PDB) Default() { } // TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. -//+kubebuilder:webhook:path=/validate-database-oracle-com-v1alpha1-pdb,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=pdbs,verbs=create;update,versions=v1alpha1,name=vpdb.kb.io,admissionReviewVersions={v1,v1beta1} +//+kubebuilder:webhook:path=/validate-database-oracle-com-v4-pdb,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=pdbs,verbs=create;update,versions=v4,name=vpdb.kb.io,admissionReviewVersions={v1,v1beta1} var _ webhook.Validator = &PDB{} @@ -162,8 +163,19 @@ func (r *PDB) validateAction(allErrs *field.ErrorList) { *allErrs = append(*allErrs, field.Required(field.NewPath("spec").Child("pdbTlsCat"), "Please specify PDB Tls Certificate Authority(secret)")) } + if reflect.ValueOf(r.Spec.PDBPriKey).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("pdbOrdsPrvKey"), "Please specify PDB Tls Certificate Authority(secret)")) + } switch action { + case "DELETE": + /* BUG 36752336 - LREST OPERATOR - DELETE NON-EXISTENT PDB SHOWS LRPDB CREATED MESSAGE */ + if r.Status.OpenMode == "READ WRITE" { + pdblog.Info("Cannot delete: pdb is open ") + *allErrs = append(*allErrs, field.Invalid(field.NewPath("status").Child("OpenMode"), "READ WRITE", "pdb "+r.Spec.PDBName+" "+r.Status.OpenMode)) + } + r.CheckObjExistence("DELETE", allErrs, r) case "CREATE": if reflect.ValueOf(r.Spec.AdminName).IsZero() { *allErrs = append(*allErrs, @@ -211,6 +223,13 @@ func (r *PDB) validateAction(allErrs *field.ErrorList) { *allErrs = append(*allErrs, field.Required(field.NewPath("spec").Child("tempSize"), "When the storage is not UNLIMITED the Temp Size must be specified")) } + /* We don't need this check as ords open the pdb before cloninig */ + /* + if r.Status.OpenMode == "MOUNTED" { + pdblog.Info("Cannot clone: pdb is mount ") + *allErrs = append(*allErrs, field.Invalid(field.NewPath("status").Child("OpenMode"), "READ WRITE", "pdb "+r.Spec.PDBName+" "+r.Status.OpenMode)) + } + */ case "PLUG": if r.Spec.XMLFileName == "" { *allErrs = append(*allErrs, @@ -239,6 +258,11 @@ func (r *PDB) validateAction(allErrs *field.ErrorList) { if *(r.Spec.TDEExport) { r.validateTDEInfo(allErrs) } + if r.Status.OpenMode == "READ WRITE" { + pdblog.Info("Cannot unplug: pdb is open ") + *allErrs = append(*allErrs, field.Invalid(field.NewPath("status").Child("OpenMode"), "READ WRITE", "pdb "+r.Spec.PDBName+" "+r.Status.OpenMode)) + } + r.CheckObjExistence("UNPLUG", allErrs, r) case "MODIFY": if r.Spec.PDBState == "" { *allErrs = append(*allErrs, @@ -248,6 +272,7 @@ func (r *PDB) validateAction(allErrs *field.ErrorList) { *allErrs = append(*allErrs, field.Required(field.NewPath("spec").Child("modifyOption"), "Please specify an option for opening/closing a PDB")) } + r.CheckObjExistence("MODIY", allErrs, r) } } @@ -333,3 +358,12 @@ func (r *PDB) validateTDEInfo(allErrs *field.ErrorList) { } } + +func (r *PDB) CheckObjExistence(action string, allErrs *field.ErrorList, pdb *PDB) { + /* BUG 36752465 - lrest operator - open non-existent pdb creates a lrpdb with status failed */ + pdblog.Info("Action [" + action + "] checkin " + pdb.Spec.PDBName + " existence") + if pdb.Status.OpenMode == "" { + *allErrs = append(*allErrs, field.NotFound(field.NewPath("Spec").Child("PDBName"), " "+pdb.Spec.PDBName+" does not exist : action "+action+" failure")) + + } +} diff --git a/apis/database/v4/shardingdatabase_conversion.go b/apis/database/v4/shardingdatabase_conversion.go new file mode 100644 index 00000000..7b2c17ac --- /dev/null +++ b/apis/database/v4/shardingdatabase_conversion.go @@ -0,0 +1,4 @@ +package v4 + +// Hub defines v1 as the hub version +func (*ShardingDatabase) Hub() {} diff --git a/apis/database/v4/shardingdatabase_types.go b/apis/database/v4/shardingdatabase_types.go new file mode 100644 index 00000000..cc01b24d --- /dev/null +++ b/apis/database/v4/shardingdatabase_types.go @@ -0,0 +1,427 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + "sync" + + "encoding/json" + + "sigs.k8s.io/controller-runtime/pkg/client" + + annsv1 "github.com/oracle/oracle-database-operator/commons/annotations" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// ShardingDatabaseSpec defines the desired state of ShardingDatabase +type ShardingDatabaseSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + Shard []ShardSpec `json:"shard"` + Catalog []CatalogSpec `json:"catalog"` // The catalogSpes accept all the catalog parameters + Gsm []GsmSpec `json:"gsm"` // The GsmSpec will accept all the Gsm parameter + StorageClass string `json:"storageClass,omitempty"` // Optional Accept storage class name + DbImage string `json:"dbImage"` // Accept DB Image name + DbImagePullSecret string `json:"dbImagePullSecret,omitempty"` // Optional The name of an image pull secret in case of a private docker repository. + GsmImage string `json:"gsmImage"` // Acccept the GSM image name + GsmImagePullSecret string `json:"gsmImagePullSecret,omitempty"` // Optional The name of an image pull secret in case of a private docker repository. + StagePvcName string `json:"stagePvcName,omitempty"` // the Stagepvc for the backup of cluster + PortMappings []PortMapping `json:"portMappings,omitempty"` // Port mappings for the service that is created. The service is created if there is at least + IsDebug bool `json:"isDebug,omitempty"` // Optional parameter to enable logining + IsExternalSvc bool `json:"isExternalSvc,omitempty"` + IsClone bool `json:"isClone,omitempty"` + IsDataGuard bool `json:"isDataGuard,omitempty"` + ScriptsLocation string `json:"scriptsLocation,omitempty"` + IsDeleteOraPvc bool `json:"isDeleteOraPvc,omitempty"` + ReadinessCheckPeriod int `json:"readinessCheckPeriod,omitempty"` + LivenessCheckPeriod int `json:"liveinessCheckPeriod,omitempty"` + ReplicationType string `json:"replicationType,omitempty"` + IsDownloadScripts bool `json:"isDownloadScripts,omitempty"` + InvitedNodeSubnetFlag string `json:"invitedNodeSubnetFlag,omitempty"` + InvitedNodeSubnet string `json:"InvitedNodeSubnet,omitempty"` + ShardingType string `json:"shardingType,omitempty"` + GsmShardSpace []GsmShardSpaceSpec `json:"gsmShardSpace,omitempty"` + GsmShardGroup []GsmShardGroupSpec `json:"gsmShardGroup,omitempty"` + ShardRegion []string `json:"shardRegion,omitempty"` + ShardBuddyRegion string `json:"shardBuddyRegion,omitempty"` + GsmService []GsmServiceSpec `json:"gsmService,omitempty"` + ShardConfigName string `json:"shardConfigName,omitempty"` + GsmDevMode string `json:"gsmDevMode,omitempty"` + DbSecret *SecretDetails `json:"dbSecret,omitempty"` // Secret Name to be used with Shard + IsTdeWallet string `json:"isTdeWallet,omitempty"` + TdeWalletPvc string `json:"tdeWalletPvc,omitempty"` + FssStorageClass string `json:"fssStorageClass,omitempty"` + TdeWalletPvcMountLocation string `json:"tdeWalletPvcMountLocation,omitempty"` + DbEdition string `json:"dbEdition,omitempty"` + TopicId string `json:"topicId,omitempty"` +} + +// To understand Metav1.Condition, please refer the link https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1 +// ShardingDatabaseStatus defines the observed state of ShardingDatabase +type ShardingDatabaseStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + Shard map[string]string `json:"shards,omitempty"` + Catalog map[string]string `json:"catalogs,omitempty"` + + Gsm GsmStatus `json:"gsm,omitempty"` + + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + CrdStatus []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +type GsmStatus struct { + InternalconnectStr string `json:"internalConnectStr,omitempty"` + ExternalConnectStr string `json:"externalConnectStr,omitempty"` + State string `json:"state,omitempty"` + Shards map[string]string `json:"shards,omitempty"` + Details map[string]string `json:"details,omitempty"` + Services string `json:"services,omitempty"` +} + +type GsmShardDetails struct { + Name string `json:"name,omitempty"` + Available string `json:"available,omitempty"` + State string `json:"State,omitempty"` +} + +type GsmStatusDetails struct { + Name string `json:"name,omitempty"` + K8sInternalSvc string `json:"k8sInternalSvc,omitempty"` + K8sExternalSvc string `json:"k8sExternalSvc,omitempty"` + K8sInternalSvcIP string `json:"k8sInternalIP,omitempty"` + K8sExternalSvcIP string `json:"k8sExternalIP,omitempty"` + Role string `json:"role,omitempty"` + DbPasswordSecret string `json:"dbPasswordSecret"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:printcolumn:JSONPath=".status.gsm.state",name="Gsm State",type=string +//+kubebuilder:printcolumn:JSONPath=".status.gsm.services",name="Services",type=string +//+kubebuilder:printcolumn:JSONPath=".status.gsm.shards",name="shards",type=string,priority=1 + +// ShardingDatabase is the Schema for the shardingdatabases API +// +kubebuilder:resource:path=shardingdatabases,scope=Namespaced +// +kubebuilder:storageversion +type ShardingDatabase struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ShardingDatabaseSpec `json:"spec,omitempty"` + Status ShardingDatabaseStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// ShardingDatabaseList contains a list of ShardingDatabase +type ShardingDatabaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ShardingDatabase `json:"items"` +} + +// ShardSpec is a specification of Shards for an application deployment. +// +k8s:openapi-gen=true +type ShardSpec struct { + Name string `json:"name"` // Shard name that will be used deploy StatefulSet + StorageSizeInGb int32 `json:"storageSizeInGb,omitempty"` // Optional Shard Storage Size + EnvVars []EnvironmentVariable `json:"envVars,omitempty"` //Optional Env variables for Shards + Resources *corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,1,opt,name=resources"` //Optional resource requirement for the container. + PvcName string `json:"pvcName,omitempty"` + Label string `json:"label,omitempty"` + // +kubebuilder:validation:Enum=enable;disable;failed;force + IsDelete string `json:"isDelete,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + PvAnnotations map[string]string `json:"pvAnnotations,omitempty"` + PvMatchLabels map[string]string `json:"pvMatchLabels,omitempty"` + ImagePulllPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + ShardSpace string `json:"shardSpace,omitempty"` + ShardGroup string `json:"shardGroup,omitempty"` + ShardRegion string `json:"shardRegion,omitempty"` + DeployAs string `json:"deployAs,omitempty"` +} + +// CatalogSpec defines the desired state of CatalogSpec +// +k8s:openapi-gen=true +type CatalogSpec struct { + Name string `json:"name"` // Catalog name that will be used deploy StatefulSet + StorageSizeInGb int32 `json:"storageSizeInGb,omitempty"` // Optional Catalog Storage Size and This parameter will not be used if you use PvcName + EnvVars []EnvironmentVariable `json:"envVars,omitempty"` //Optional Env variables for Catalog + Resources *corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,1,opt,name=resources"` // Optional resource requirement for the container. + PvcName string `json:"pvcName,omitempty"` + Label string `json:"label,omitempty"` + IsDelete string `json:"isDelete,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + PvAnnotations map[string]string `json:"pvAnnotations,omitempty"` + PvMatchLabels map[string]string `json:"pvMatchLabels,omitempty"` + ImagePulllPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` +} + +// GsmSpec defines the desired state of GsmSpec +// +k8s:openapi-gen=true +type GsmSpec struct { + Name string `json:"name"` // Gsm name that will be used deploy StatefulSet + + //Replicas int32 `json:"replicas,omitempty"` // Gsm Replicas. If you set OraGsmPvcName then it is set default to 1. + EnvVars []EnvironmentVariable `json:"envVars,omitempty"` //Optional Env variables for GSM + StorageSizeInGb int32 `json:"storageSizeInGb,omitempty"` // This parameter will not be used if you use OraGsmPvcName + Resources *corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,1,opt,name=resources"` // Optional resource requirement for the container. + PvcName string `json:"pvcName,omitempty"` + Label string `json:"label,omitempty"` // Optional GSM Label + IsDelete string `json:"isDelete,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + PvAnnotations map[string]string `json:"pvAnnotations,omitempty"` + PvMatchLabels map[string]string `json:"pvMatchLabels,omitempty"` + ImagePulllPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + Region string `json:"region,omitempty"` + DirectorName string `json:"directorName,omitempty"` +} + +// ShardGroupSpec Specification + +type GsmShardGroupSpec struct { + Name string `json:"name"` // Name of the shardgroup. + Region string `json:"region,omitempty"` + DeployAs string `json:"deployAs,omitempty"` +} + +// ShardSpace Specs +type GsmShardSpaceSpec struct { + Name string `json:"name"` // Name of the shardSpace. + Chunks int `json:"chunks,omitempty"` //chunks is optional + ProtectionMode string `json:"protectionMode,omitempty"` // Data guard protection mode + ShardGroup string `json:"shardGroup,omitempty"` +} + +// Service Definition +type GsmServiceSpec struct { + Name string `json:"name"` // Name of the shardSpace. + Available string `json:"available,omitempty"` + ClbGoal string `json:"clbGoal,omitempty"` + CommitOutcome string `json:"commitOutcome,omitempty"` + DrainTimeout string `json:"drainTimeout,omitempty"` + Dtp string `json:"dtp,omitempty"` + Edition string `json:"edition,omitempty"` + FailoverPrimary string `json:"failoverPrimary,omitempty"` + FailoverRestore string `json:"failoverRestore,omitempty"` + FailoverDelay string `json:"failoverDelay,omitempty"` + FailoverMethod string `json:"failoverMethod,omitempty"` + FailoverRetry string `json:"failoverRetry,omitempty"` + FailoverType string `json:"failoverType,omitempty"` + GdsPool string `json:"gdsPool,omitempty"` + Role string `json:"role,omitempty"` + SessionState string `json:"sessionState,omitempty"` + Lag int `json:"lag,omitempty"` + Locality string `json:"locality,omitempty"` + Notification string `json:"notification,omitempty"` + PdbName string `json:"pdbName,omitempty"` + Policy string `json:"policy,omitempty"` + Preferrred string `json:"preferred,omitempty"` + PreferredAll string `json:"prferredAll,omitempty"` + RegionFailover string `json:"regionFailover,omitempty"` + StopOption string `json:"stopOption,omitempty"` + SqlTrasactionProfile string `json:"sqlTransactionProfile,omitempty"` + TableFamily string `json:"tableFamily,omitempty"` + Retention string `json:"retention,omitempty"` + TfaPolicy string `json:"tfaPolicy,omitempty"` +} + +// Secret Details +type SecretDetails struct { + Name string `json:"name"` // Name of the secret. + KeyFileName string `json:"keyFileName,omitempty"` // Name of the key. + NsConfigMap string `json:"nsConfigMap,omitempty"` + NsSecret string `json:"nsSecret,omitempty"` + PwdFileName string `json:"pwdFileName"` + PwdFileMountLocation string `json:"pwdFileMountLocation,omitempty"` + KeyFileMountLocation string `json:"keyFileMountLocation,omitempty"` + KeySecretName string `json:"keySecretName,omitempty"` + EncryptionType string `json:"encryptionType,omitempty"` +} + +// EnvironmentVariable represents a named variable accessible for containers. +// +k8s:openapi-gen=true +type EnvironmentVariable struct { + Name string `json:"name"` // Name of the variable. Must be a C_IDENTIFIER. + Value string `json:"value"` // Value of the variable, as defined in Kubernetes core API. +} + +// PortMapping is a specification of port mapping for an application deployment. +// +k8s:openapi-gen=true +type PortMapping struct { + Port int32 `json:"port"` // Port that will be exposed on the service. + TargetPort int32 `json:"targetPort"` // Docker image port for the application. + Protocol corev1.Protocol `json:"protocol"` // IP protocol for the mapping, e.g., "TCP" or "UDP". +} + +type SfsetLabel string + +const ( + ShardingDelLabelKey SfsetLabel = "sharding.oracle.com/delflag" + ShardingDelLabelTrueValue SfsetLabel = "true" + ShardingDelLabelFalseValue SfsetLabel = "false" +) + +type ShardStatusMapKeys string + +const ( + Name ShardStatusMapKeys = "Name" + K8sInternalSvc ShardStatusMapKeys = "K8sInternalSvc" + K8sExternalSvc ShardStatusMapKeys = "K8sExternalSvc" + K8sInternalSvcIP ShardStatusMapKeys = "K8sInternalSvcIP" + K8sExternalSvcIP ShardStatusMapKeys = "K8sExternalSvcIP" + OracleSid ShardStatusMapKeys = "OracleSid" + OraclePdb ShardStatusMapKeys = "OraclePdb" + Role ShardStatusMapKeys = "Role" + DbPasswordSecret ShardStatusMapKeys = "DbPasswordSecret" + State ShardStatusMapKeys = "State" + OpenMode ShardStatusMapKeys = "OpenMode" +) + +type ShardLifecycleState string + +const ( + AvailableState ShardLifecycleState = "AVAILABLE" + FailedState ShardLifecycleState = "FAILED" + UpdateState ShardLifecycleState = "UPDATING" + ProvisionState ShardLifecycleState = "PROVISIONING" + PodNotReadyState ShardLifecycleState = "PODNOTREADY" + PodFailureState ShardLifecycleState = "PODFAILURE" + PodNotFound ShardLifecycleState = "PODNOTFOUND" + StatefulSetFailure ShardLifecycleState = "STATEFULSETFAILURE" + StatefulSetNotFound ShardLifecycleState = "STATEFULSETNOTFOUND" + DeletingState ShardLifecycleState = "DELETING" + DeleteErrorState ShardLifecycleState = "DELETE_ERROR" + ChunkMoveError ShardLifecycleState = "CHUNK_MOVE_ERROR_IN_GSM" + Terminated ShardLifecycleState = "TERMINATED" + LabelPatchingError ShardLifecycleState = "LABELPATCHINGERROR" + DeletePVCError ShardLifecycleState = "DELETEPVCERROR" + AddingShardState ShardLifecycleState = "SHARD_ADDITION" + AddingShardErrorState ShardLifecycleState = "SHARD_ADDITION_ERROR_IN_GSM" + ShardOnlineErrorState ShardLifecycleState = "SHARD_ONLINE_ERROR_IN_GSM" + ShardOnlineState ShardLifecycleState = "ONLINE_SHARD" + ShardRemoveError ShardLifecycleState = "SHARD_DELETE_ERROR_FROM_GSM" +) + +type CrdReconcileState string + +const ( + CrdReconcileErrorState CrdReconcileState = "ReconcileError" + CrdReconcileErrorReason CrdReconcileState = "LastReconcileCycleFailed" + CrdReconcileQueuedState CrdReconcileState = "ReconcileQueued" + CrdReconcileQueuedReason CrdReconcileState = "LastReconcileCycleQueued" + CrdReconcileCompeleteState CrdReconcileState = "ReconcileComplete" + CrdReconcileCompleteReason CrdReconcileState = "LastReconcileCycleCompleted" + CrdReconcileWaitingState CrdReconcileState = "ReconcileWaiting" + CrdReconcileWaitingReason CrdReconcileState = "LastReconcileCycleWaiting" +) + +// var +var KubeConfigOnce sync.Once + +// #const lastSuccessfulSpec = "lastSuccessfulSpec" +const lastSuccessfulSpecOnsInfo = "lastSuccessfulSpeOnsInfo" + +// GetLastSuccessfulSpec returns spec from the lass successful reconciliation. +// Returns nil, nil if there is no lastSuccessfulSpec. +func (shardingv1 *ShardingDatabase) GetLastSuccessfulSpec() (*ShardingDatabaseSpec, error) { + val, ok := shardingv1.GetAnnotations()[lastSuccessfulSpec] + if !ok { + return nil, nil + } + + specBytes := []byte(val) + sucSpec := ShardingDatabaseSpec{} + + err := json.Unmarshal(specBytes, &sucSpec) + if err != nil { + return nil, err + } + + return &sucSpec, nil +} + +// UpdateLastSuccessfulSpec updates lastSuccessfulSpec with the current spec. +func (shardingv1 *ShardingDatabase) UpdateLastSuccessfulSpec(kubeClient client.Client) error { + specBytes, err := json.Marshal(shardingv1.Spec) + if err != nil { + return err + } + + anns := map[string]string{ + lastSuccessfulSpec: string(specBytes), + } + + return annsv1.PatchAnnotations(kubeClient, shardingv1, anns) +} + +// GetLastSuccessfulOnsInfo returns spec from the lass successful reconciliation. +// Returns nil, nil if there is no lastSuccessfulSpec. +func (shardingv1 *ShardingDatabase) GetLastSuccessfulOnsInfo() ([]byte, error) { + val, ok := shardingv1.GetAnnotations()[lastSuccessfulSpecOnsInfo] + if !ok { + return nil, nil + } + specBytes := []byte(val) + return specBytes, nil +} + +// UpdateLastSuccessfulSpec updates lastSuccessfulSpec with the current spec. +func (shardingv1 *ShardingDatabase) UpdateLastSuccessfulSpecOnsInfo(kubeClient client.Client, specBytes []byte) error { + + anns := map[string]string{ + lastSuccessfulSpecOnsInfo: string(specBytes), + } + + return annsv1.PatchAnnotations(kubeClient, shardingv1, anns) +} + +func init() { + SchemeBuilder.Register(&ShardingDatabase{}, &ShardingDatabaseList{}) +} diff --git a/apis/database/v4/shardingdatabase_webhook.go b/apis/database/v4/shardingdatabase_webhook.go new file mode 100644 index 00000000..1ac74d08 --- /dev/null +++ b/apis/database/v4/shardingdatabase_webhook.go @@ -0,0 +1,314 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var shardingdatabaselog = logf.Log.WithName("shardingdatabase-resource") + +func (r *ShardingDatabase) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! + +//+kubebuilder:webhook:path=/mutate-database-oracle-com-v4-shardingdatabase,mutating=true,failurePolicy=fail,sideEffects=none,groups=database.oracle.com,resources=shardingdatabases,verbs=create;update,versions=v4,name=mshardingdatabasev4.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &ShardingDatabase{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *ShardingDatabase) Default() { + shardingdatabaselog.Info("default", "name", r.Name) + + // TODO(user): fill in your defaulting logic. + if r.Spec.GsmDevMode != "" { + r.Spec.GsmDevMode = "dev" + } + + if r.Spec.IsTdeWallet == "" { + r.Spec.IsTdeWallet = "disable" + } + for pindex := range r.Spec.Shard { + if strings.ToLower(r.Spec.Shard[pindex].IsDelete) == "" { + r.Spec.Shard[pindex].IsDelete = "disable" + } + } + +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:verbs=create;update;delete,path=/validate-database-oracle-com-v4-shardingdatabase,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=shardingdatabases,versions=v4,name=vshardingdatabasev4.kb.io,admissionReviewVersions={v1} + +var _ webhook.Validator = &ShardingDatabase{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *ShardingDatabase) ValidateCreate() (admission.Warnings, error) { + shardingdatabaselog.Info("validate create", "name", r.Name) + + // TODO(user): fill in your validation logic upon object creation. + // Check Secret configuration + var validationErr field.ErrorList + var validationErrs1 field.ErrorList + + //namespaces := db.GetWatchNamespaces() + //_, containsNamespace := namespaces[r.Namespace] + // Check if the allowed namespaces maps contains the required namespace + // if len(namespaces) != 0 && !containsNamespace { + // validationErr = append(validationErr, + // field.Invalid(field.NewPath("metadata").Child("namespace"), r.Namespace, + // "Oracle database operator doesn't watch over this namespace")) + //} + + if r.Spec.DbSecret == nil { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("DbSecret"), r.Spec.DbSecret, + "DbSecret cannot be set to nil")) + } else { + if len(r.Spec.DbSecret.Name) == 0 { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("DbSecret").Child("Name"), r.Spec.DbSecret.Name, + "Secret name cannot be set empty")) + } + if len(r.Spec.DbSecret.PwdFileName) == 0 { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("DbSecret").Child("PwdFileName"), r.Spec.DbSecret.PwdFileName, + "Password file name cannot be set empty")) + } + if strings.ToLower(r.Spec.DbSecret.EncryptionType) != "base64" { + if strings.ToLower(r.Spec.DbSecret.KeyFileName) == "" { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("DbSecret").Child("KeyFileName"), r.Spec.DbSecret.KeyFileName, + "Key file name cannot be empty")) + } + } + + /** + if len(r.Spec.DbSecret.PwdFileMountLocation) == 0 { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("DbSecret").Child("PwdFileMountLocation"), r.Spec.DbSecret.PwdFileMountLocation, + "Password file mount location cannot be empty")) + } + + if len(r.Spec.DbSecret.KeyFileMountLocation) == 0 { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("DbSecret").Child("KeyFileMountLocation"), r.Spec.DbSecret.KeyFileMountLocation, + "KeyFileMountLocation file mount location cannot be empty")) + } + **/ + } + + if r.Spec.IsTdeWallet == "enable" { + if (len(r.Spec.FssStorageClass) == 0) && (len(r.Spec.TdeWalletPvc) == 0) { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("FssStorageClass"), r.Spec.FssStorageClass, + "FssStorageClass or TdeWalletPvc cannot be set empty if isTdeWallet set to true")) + + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("TdeWalletPvc"), r.Spec.TdeWalletPvc, + "FssStorageClass or TdeWalletPvc cannot be set empty if isTdeWallet set to true")) + } + } + + if r.Spec.IsTdeWallet != "" { + if (strings.ToLower(strings.TrimSpace(r.Spec.IsTdeWallet)) != "enable") && (strings.ToLower(strings.TrimSpace(r.Spec.IsTdeWallet)) != "disable") { + validationErr = append(validationErr, + field.Invalid(field.NewPath("spec").Child("isTdeWallet"), r.Spec.IsTdeWallet, + "isTdeWallet can be set to only \"enable\" or \"disable\"")) + } + } + + validationErrs1 = r.validateShardIsDelete() + if validationErrs1 != nil { + validationErr = append(validationErr, validationErrs1...) + } + + validationErrs1 = r.validateFreeEdition() + if validationErrs1 != nil { + validationErr = append(validationErr, validationErrs1...) + } + + validationErrs1 = r.validateCatalogName() + if validationErrs1 != nil { + validationErr = append(validationErr, validationErrs1...) + } + + validationErrs1 = r.validateShardName() + if validationErrs1 != nil { + validationErr = append(validationErr, validationErrs1...) + } + + // TODO(user): fill in your validation logic upon object creation. + if len(validationErr) == 0 { + return nil, nil + } + + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "ShardingDatabase"}, + r.Name, validationErr) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *ShardingDatabase) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + shardingdatabaselog.Info("validate update", "name", r.Name) + + // TODO(user): fill in your validation logic upon object update. + return nil, nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *ShardingDatabase) ValidateDelete() (admission.Warnings, error) { + shardingdatabaselog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} + +// ###### Vlaidation Block ################# + +func (r *ShardingDatabase) validateShardIsDelete() field.ErrorList { + + var validationErrs field.ErrorList + + for pindex := range r.Spec.Shard { + if (strings.ToLower(strings.TrimSpace(r.Spec.Shard[pindex].IsDelete)) != "enable") && (strings.ToLower(strings.TrimSpace(r.Spec.Shard[pindex].IsDelete)) != "disable") && (strings.ToLower(strings.TrimSpace(r.Spec.Shard[pindex].IsDelete)) != "failed") { + validationErrs = append(validationErrs, + field.Invalid(field.NewPath("spec").Child("shard").Child("isDelete"), r.Spec.Shard[pindex].IsDelete, + "r.Spec.Shard[pindex].IsDelete can be set to only enable|disable|failed")) + } + } + + if len(validationErrs) > 0 { + return validationErrs + } + return nil +} + +func (r *ShardingDatabase) validateFreeEdition() field.ErrorList { + + var validationErrs field.ErrorList + if strings.ToLower(r.Spec.DbEdition) == "free" { + // Shard Spec Checks + for i := 0; i < len(r.Spec.Shard); i++ { + for index, variable := range r.Spec.Shard[i].EnvVars { + if variable.Name == "ORACLE_SID" { + if strings.ToLower(variable.Value) != "free" { + validationErrs = append(validationErrs, field.Invalid(field.NewPath("spec").Child("shard").Child("EnvVars"), r.Spec.Shard[i].EnvVars[index].Name, + "r.Spec.Shard[i].EnvVars[index].Name ORACLE_SID value can only be set to free")) + } + } + if variable.Name == "ORACLE_PDB" { + if strings.ToLower(variable.Value) != "freepdb" { + validationErrs = append(validationErrs, field.Invalid(field.NewPath("spec").Child("shard").Child("EnvVars"), r.Spec.Shard[i].EnvVars[index].Name, + "r.Spec.Shard[i].EnvVars[index].Name ORACLE_PDB value can only be set to freepdb")) + } + } + } + } + // Catalog Spec Checks + for i := 0; i < len(r.Spec.Catalog); i++ { + for index, variable := range r.Spec.Catalog[i].EnvVars { + if variable.Name == "ORACLE_SID" { + if strings.ToLower(variable.Value) != "free" { + validationErrs = append(validationErrs, field.Invalid(field.NewPath("spec").Child("catalog").Child("EnvVars"), r.Spec.Catalog[i].EnvVars[index].Name, + "r.Spec.Catalog[i].EnvVars[index].Name ORACLE_SID value can only be set to free")) + } + } + if variable.Name == "ORACLE_PDB" { + if strings.ToLower(variable.Value) != "freepdb" { + validationErrs = append(validationErrs, field.Invalid(field.NewPath("spec").Child("catalog").Child("EnvVars"), r.Spec.Catalog[i].EnvVars[index].Name, + "r.Spec.Catalog[i].EnvVars[index].Name ORACLE_PDB value can only be set to freepdb")) + } + } + } + } + } + + if len(validationErrs) > 0 { + return validationErrs + } + return nil +} + +func (r *ShardingDatabase) validateShardName() field.ErrorList { + var validationErrs field.ErrorList + + for pindex := range r.Spec.Shard { + if len(r.Spec.Shard[pindex].Name) > 9 { + validationErrs = append(validationErrs, + field.Invalid(field.NewPath("spec").Child("shard").Child("Name"), r.Spec.Shard[pindex].Name, + "Shard Name cannot be greater than 9 characters.")) + } + } + + if len(validationErrs) > 0 { + return validationErrs + } + return nil +} + +func (r *ShardingDatabase) validateCatalogName() field.ErrorList { + var validationErrs field.ErrorList + + for pindex := range r.Spec.Catalog { + if len(r.Spec.Catalog[pindex].Name) > 9 { + validationErrs = append(validationErrs, + field.Invalid(field.NewPath("spec").Child("catalog").Child("Name"), r.Spec.Catalog[pindex].Name, + "Catalog Name cannot be greater than 9 characters.")) + } + } + + if len(validationErrs) > 0 { + return validationErrs + } + return nil +} diff --git a/apis/database/v4/singleinstancedatabase_conversion.go b/apis/database/v4/singleinstancedatabase_conversion.go new file mode 100644 index 00000000..93638482 --- /dev/null +++ b/apis/database/v4/singleinstancedatabase_conversion.go @@ -0,0 +1,4 @@ +package v4 + +// Hub defines v1 as the hub version +func (*SingleInstanceDatabase) Hub() {} diff --git a/apis/database/v4/singleinstancedatabase_types.go b/apis/database/v4/singleinstancedatabase_types.go new file mode 100644 index 00000000..4f4836d7 --- /dev/null +++ b/apis/database/v4/singleinstancedatabase_types.go @@ -0,0 +1,231 @@ +/* +** Copyright (c) 2023 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// SingleInstanceDatabaseSpec defines the desired state of SingleInstanceDatabase +type SingleInstanceDatabaseSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // +kubebuilder:validation:Enum=standard;enterprise;express;free + Edition string `json:"edition,omitempty"` + + // SID must be alphanumeric (no special characters, only a-z, A-Z, 0-9), and no longer than 12 characters. + // +k8s:openapi-gen=true + // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]+$` + // +kubebuilder:validation:MaxLength:=12 + Sid string `json:"sid,omitempty"` + Charset string `json:"charset,omitempty"` + Pdbname string `json:"pdbName,omitempty"` + LoadBalancer bool `json:"loadBalancer,omitempty"` + ListenerPort int `json:"listenerPort,omitempty"` + TcpsListenerPort int `json:"tcpsListenerPort,omitempty"` + ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` + FlashBack *bool `json:"flashBack,omitempty"` + ArchiveLog *bool `json:"archiveLog,omitempty"` + ForceLogging *bool `json:"forceLog,omitempty"` + EnableTCPS bool `json:"enableTCPS,omitempty"` + TcpsCertRenewInterval string `json:"tcpsCertRenewInterval,omitempty"` + TcpsTlsSecret string `json:"tcpsTlsSecret,omitempty"` + + PrimaryDatabaseRef string `json:"primaryDatabaseRef,omitempty"` + // +kubebuilder:validation:Enum=primary;standby;clone;truecache + CreateAs string `json:"createAs,omitempty"` + ReadinessCheckPeriod int `json:"readinessCheckPeriod,omitempty"` + ServiceAccountName string `json:"serviceAccountName,omitempty"` + TrueCacheServices []string `json:"trueCacheServices,omitempty"` + + // +k8s:openapi-gen=true + Replicas int `json:"replicas,omitempty"` + + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + AdminPassword SingleInstanceDatabaseAdminPassword `json:"adminPassword,omitempty"` + Image SingleInstanceDatabaseImage `json:"image"` + Persistence SingleInstanceDatabasePersistence `json:"persistence,omitempty"` + InitParams *SingleInstanceDatabaseInitParams `json:"initParams,omitempty"` + Resources SingleInstanceDatabaseResources `json:"resources,omitempty"` + + ConvertToSnapshotStandby bool `json:"convertToSnapshotStandby,omitempty"` +} + +type SingleInstanceDatabaseResource struct { + Cpu string `json:"cpu,omitempty"` + Memory string `json:"memory,omitempty"` +} + +type SingleInstanceDatabaseResources struct { + Requests *SingleInstanceDatabaseResource `json:"requests,omitempty"` + Limits *SingleInstanceDatabaseResource `json:"limits,omitempty"` +} + +// SingleInstanceDatabasePersistence defines the storage size and class for PVC +type SingleInstanceDatabasePersistence struct { + Size string `json:"size,omitempty"` + StorageClass string `json:"storageClass,omitempty"` + // +kubebuilder:validation:Enum=ReadWriteOnce;ReadWriteMany + AccessMode string `json:"accessMode,omitempty"` + DatafilesVolumeName string `json:"datafilesVolumeName,omitempty"` + ScriptsVolumeName string `json:"scriptsVolumeName,omitempty"` + VolumeClaimAnnotation string `json:"volumeClaimAnnotation,omitempty"` + SetWritePermissions *bool `json:"setWritePermissions,omitempty"` +} + +// SingleInstanceDatabaseInitParams defines the Init Parameters +type SingleInstanceDatabaseInitParams struct { + SgaTarget int `json:"sgaTarget,omitempty"` + PgaAggregateTarget int `json:"pgaAggregateTarget,omitempty"` + CpuCount int `json:"cpuCount,omitempty"` + Processes int `json:"processes,omitempty"` +} + +// SingleInstanceDatabaseImage defines the Image source and pullSecrets for POD +type SingleInstanceDatabaseImage struct { + Version string `json:"version,omitempty"` + PullFrom string `json:"pullFrom"` + PullSecrets string `json:"pullSecrets,omitempty"` + PrebuiltDB bool `json:"prebuiltDB,omitempty"` +} + +// SingleInsatnceAdminPassword defines the secret containing Admin Password mapped to secretKey for Database +type SingleInstanceDatabaseAdminPassword struct { + SecretName string `json:"secretName"` + // +kubebuilder:default:="oracle_pwd" + SecretKey string `json:"secretKey,omitempty"` + KeepSecret *bool `json:"keepSecret,omitempty"` +} + +// SingleInstanceDatabaseStatus defines the observed state of SingleInstanceDatabase +type SingleInstanceDatabaseStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + Nodes []string `json:"nodes,omitempty"` + Role string `json:"role,omitempty"` + Status string `json:"status,omitempty"` + Replicas int `json:"replicas,omitempty"` + ReleaseUpdate string `json:"releaseUpdate,omitempty"` + DgBroker *string `json:"dgBroker,omitempty"` + // +kubebuilder:default:="false" + DatafilesPatched string `json:"datafilesPatched,omitempty"` + ConnectString string `json:"connectString,omitempty"` + ClusterConnectString string `json:"clusterConnectString,omitempty"` + TcpsConnectString string `json:"tcpsConnectString,omitempty"` + StandbyDatabases map[string]string `json:"standbyDatabases,omitempty"` + // +kubebuilder:default:="false" + DatafilesCreated string `json:"datafilesCreated,omitempty"` + Sid string `json:"sid,omitempty"` + Edition string `json:"edition,omitempty"` + Charset string `json:"charset,omitempty"` + Pdbname string `json:"pdbName,omitempty"` + InitSgaSize int `json:"initSgaSize,omitempty"` + InitPgaSize int `json:"initPgaSize,omitempty"` + CreatedAs string `json:"createdAs,omitempty"` + FlashBack string `json:"flashBack,omitempty"` + ArchiveLog string `json:"archiveLog,omitempty"` + ForceLogging string `json:"forceLog,omitempty"` + OemExpressUrl string `json:"oemExpressUrl,omitempty"` + OrdsReference string `json:"ordsReference,omitempty"` + PdbConnectString string `json:"pdbConnectString,omitempty"` + TcpsPdbConnectString string `json:"tcpsPdbConnectString,omitempty"` + ApexInstalled bool `json:"apexInstalled,omitempty"` + PrebuiltDB bool `json:"prebuiltDB,omitempty"` + // +kubebuilder:default:=false + IsTcpsEnabled bool `json:"isTcpsEnabled"` + CertCreationTimestamp string `json:"certCreationTimestamp,omitempty"` + CertRenewInterval string `json:"certRenewInterval,omitempty"` + ClientWalletLoc string `json:"clientWalletLoc,omitempty"` + PrimaryDatabase string `json:"primaryDatabase,omitempty"` + // +kubebuilder:default:="" + TcpsTlsSecret string `json:"tcpsTlsSecret"` + + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + + InitParams SingleInstanceDatabaseInitParams `json:"initParams,omitempty"` + Persistence SingleInstanceDatabasePersistence `json:"persistence"` + + ConvertToSnapshotStandby bool `json:"convertToSnapshotStandby,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas +// +kubebuilder:printcolumn:JSONPath=".status.edition",name="Edition",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.sid",name="Sid",type="string",priority=1 +// +kubebuilder:printcolumn:JSONPath=".status.status",name="Status",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.role",name="Role",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.releaseUpdate",name="Version",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.connectString",name="Connect Str",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.pdbConnectString",name="Pdb Connect Str",type="string",priority=1 +// +kubebuilder:printcolumn:JSONPath=".status.tcpsConnectString",name="TCPS Connect Str",type="string" +// +kubebuilder:printcolumn:JSONPath=".status.tcpsPdbConnectString",name="TCPS Pdb Connect Str",type="string", priority=1 +// +kubebuilder:printcolumn:JSONPath=".status.oemExpressUrl",name="Oem Express Url",type="string" + +// SingleInstanceDatabase is the Schema for the singleinstancedatabases API +// +kubebuilder:storageversion +type SingleInstanceDatabase struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec SingleInstanceDatabaseSpec `json:"spec,omitempty"` + Status SingleInstanceDatabaseStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// SingleInstanceDatabaseList contains a list of SingleInstanceDatabase +type SingleInstanceDatabaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SingleInstanceDatabase `json:"items"` +} + +func init() { + SchemeBuilder.Register(&SingleInstanceDatabase{}, &SingleInstanceDatabaseList{}) +} diff --git a/apis/database/v4/singleinstancedatabase_webhook.go b/apis/database/v4/singleinstancedatabase_webhook.go new file mode 100644 index 00000000..b327d7d4 --- /dev/null +++ b/apis/database/v4/singleinstancedatabase_webhook.go @@ -0,0 +1,55 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +// log is for logging in this package. +var singleinstancedatabaselog = logf.Log.WithName("singleinstancedatabase-resource") + +func (r *SingleInstanceDatabase) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! diff --git a/apis/database/v4/zz_generated.deepcopy.go b/apis/database/v4/zz_generated.deepcopy.go new file mode 100644 index 00000000..4eb9425d --- /dev/null +++ b/apis/database/v4/zz_generated.deepcopy.go @@ -0,0 +1,4213 @@ +//go:build !ignore_autogenerated + +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +// Code generated by controller-gen. DO NOT EDIT. + +package v4 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + timex "time" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AcdSpec) DeepCopyInto(out *AcdSpec) { + *out = *in + in.K8sAcd.DeepCopyInto(&out.K8sAcd) + in.OciAcd.DeepCopyInto(&out.OciAcd) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AcdSpec. +func (in *AcdSpec) DeepCopy() *AcdSpec { + if in == nil { + return nil + } + out := new(AcdSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdminpdbPass) DeepCopyInto(out *AdminpdbPass) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdminpdbPass. +func (in *AdminpdbPass) DeepCopy() *AdminpdbPass { + if in == nil { + return nil + } + out := new(AdminpdbPass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdminpdbUser) DeepCopyInto(out *AdminpdbUser) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdminpdbUser. +func (in *AdminpdbUser) DeepCopy() *AdminpdbUser { + if in == nil { + return nil + } + out := new(AdminpdbUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousContainerDatabase) DeepCopyInto(out *AutonomousContainerDatabase) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousContainerDatabase. +func (in *AutonomousContainerDatabase) DeepCopy() *AutonomousContainerDatabase { + if in == nil { + return nil + } + out := new(AutonomousContainerDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutonomousContainerDatabase) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousContainerDatabaseList) DeepCopyInto(out *AutonomousContainerDatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AutonomousContainerDatabase, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousContainerDatabaseList. +func (in *AutonomousContainerDatabaseList) DeepCopy() *AutonomousContainerDatabaseList { + if in == nil { + return nil + } + out := new(AutonomousContainerDatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutonomousContainerDatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousContainerDatabaseSpec) DeepCopyInto(out *AutonomousContainerDatabaseSpec) { + *out = *in + if in.AutonomousContainerDatabaseOCID != nil { + in, out := &in.AutonomousContainerDatabaseOCID, &out.AutonomousContainerDatabaseOCID + *out = new(string) + **out = **in + } + if in.CompartmentOCID != nil { + in, out := &in.CompartmentOCID, &out.CompartmentOCID + *out = new(string) + **out = **in + } + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.AutonomousExadataVMClusterOCID != nil { + in, out := &in.AutonomousExadataVMClusterOCID, &out.AutonomousExadataVMClusterOCID + *out = new(string) + **out = **in + } + if in.FreeformTags != nil { + in, out := &in.FreeformTags, &out.FreeformTags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.OCIConfig.DeepCopyInto(&out.OCIConfig) + if in.HardLink != nil { + in, out := &in.HardLink, &out.HardLink + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousContainerDatabaseSpec. +func (in *AutonomousContainerDatabaseSpec) DeepCopy() *AutonomousContainerDatabaseSpec { + if in == nil { + return nil + } + out := new(AutonomousContainerDatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousContainerDatabaseStatus) DeepCopyInto(out *AutonomousContainerDatabaseStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousContainerDatabaseStatus. +func (in *AutonomousContainerDatabaseStatus) DeepCopy() *AutonomousContainerDatabaseStatus { + if in == nil { + return nil + } + out := new(AutonomousContainerDatabaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabase) DeepCopyInto(out *AutonomousDatabase) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabase. +func (in *AutonomousDatabase) DeepCopy() *AutonomousDatabase { + if in == nil { + return nil + } + out := new(AutonomousDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutonomousDatabase) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseBackup) DeepCopyInto(out *AutonomousDatabaseBackup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseBackup. +func (in *AutonomousDatabaseBackup) DeepCopy() *AutonomousDatabaseBackup { + if in == nil { + return nil + } + out := new(AutonomousDatabaseBackup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutonomousDatabaseBackup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseBackupList) DeepCopyInto(out *AutonomousDatabaseBackupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AutonomousDatabaseBackup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseBackupList. +func (in *AutonomousDatabaseBackupList) DeepCopy() *AutonomousDatabaseBackupList { + if in == nil { + return nil + } + out := new(AutonomousDatabaseBackupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutonomousDatabaseBackupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseBackupSpec) DeepCopyInto(out *AutonomousDatabaseBackupSpec) { + *out = *in + in.Target.DeepCopyInto(&out.Target) + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.AutonomousDatabaseBackupOCID != nil { + in, out := &in.AutonomousDatabaseBackupOCID, &out.AutonomousDatabaseBackupOCID + *out = new(string) + **out = **in + } + if in.IsLongTermBackup != nil { + in, out := &in.IsLongTermBackup, &out.IsLongTermBackup + *out = new(bool) + **out = **in + } + if in.RetentionPeriodInDays != nil { + in, out := &in.RetentionPeriodInDays, &out.RetentionPeriodInDays + *out = new(int) + **out = **in + } + in.OCIConfig.DeepCopyInto(&out.OCIConfig) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseBackupSpec. +func (in *AutonomousDatabaseBackupSpec) DeepCopy() *AutonomousDatabaseBackupSpec { + if in == nil { + return nil + } + out := new(AutonomousDatabaseBackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseBackupStatus) DeepCopyInto(out *AutonomousDatabaseBackupStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseBackupStatus. +func (in *AutonomousDatabaseBackupStatus) DeepCopy() *AutonomousDatabaseBackupStatus { + if in == nil { + return nil + } + out := new(AutonomousDatabaseBackupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseBase) DeepCopyInto(out *AutonomousDatabaseBase) { + *out = *in + if in.CompartmentId != nil { + in, out := &in.CompartmentId, &out.CompartmentId + *out = new(string) + **out = **in + } + in.AutonomousContainerDatabase.DeepCopyInto(&out.AutonomousContainerDatabase) + if in.DisplayName != nil { + in, out := &in.DisplayName, &out.DisplayName + *out = new(string) + **out = **in + } + if in.DbName != nil { + in, out := &in.DbName, &out.DbName + *out = new(string) + **out = **in + } + if in.DbVersion != nil { + in, out := &in.DbVersion, &out.DbVersion + *out = new(string) + **out = **in + } + if in.DataStorageSizeInTBs != nil { + in, out := &in.DataStorageSizeInTBs, &out.DataStorageSizeInTBs + *out = new(int) + **out = **in + } + if in.CpuCoreCount != nil { + in, out := &in.CpuCoreCount, &out.CpuCoreCount + *out = new(int) + **out = **in + } + if in.ComputeCount != nil { + in, out := &in.ComputeCount, &out.ComputeCount + *out = new(float32) + **out = **in + } + if in.OcpuCount != nil { + in, out := &in.OcpuCount, &out.OcpuCount + *out = new(float32) + **out = **in + } + in.AdminPassword.DeepCopyInto(&out.AdminPassword) + if in.IsAutoScalingEnabled != nil { + in, out := &in.IsAutoScalingEnabled, &out.IsAutoScalingEnabled + *out = new(bool) + **out = **in + } + if in.IsDedicated != nil { + in, out := &in.IsDedicated, &out.IsDedicated + *out = new(bool) + **out = **in + } + if in.IsFreeTier != nil { + in, out := &in.IsFreeTier, &out.IsFreeTier + *out = new(bool) + **out = **in + } + if in.IsAccessControlEnabled != nil { + in, out := &in.IsAccessControlEnabled, &out.IsAccessControlEnabled + *out = new(bool) + **out = **in + } + if in.WhitelistedIps != nil { + in, out := &in.WhitelistedIps, &out.WhitelistedIps + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SubnetId != nil { + in, out := &in.SubnetId, &out.SubnetId + *out = new(string) + **out = **in + } + if in.NsgIds != nil { + in, out := &in.NsgIds, &out.NsgIds + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PrivateEndpointLabel != nil { + in, out := &in.PrivateEndpointLabel, &out.PrivateEndpointLabel + *out = new(string) + **out = **in + } + if in.IsMtlsConnectionRequired != nil { + in, out := &in.IsMtlsConnectionRequired, &out.IsMtlsConnectionRequired + *out = new(bool) + **out = **in + } + if in.FreeformTags != nil { + in, out := &in.FreeformTags, &out.FreeformTags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseBase. +func (in *AutonomousDatabaseBase) DeepCopy() *AutonomousDatabaseBase { + if in == nil { + return nil + } + out := new(AutonomousDatabaseBase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseClone) DeepCopyInto(out *AutonomousDatabaseClone) { + *out = *in + in.AutonomousDatabaseBase.DeepCopyInto(&out.AutonomousDatabaseBase) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseClone. +func (in *AutonomousDatabaseClone) DeepCopy() *AutonomousDatabaseClone { + if in == nil { + return nil + } + out := new(AutonomousDatabaseClone) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseDetails) DeepCopyInto(out *AutonomousDatabaseDetails) { + *out = *in + in.AutonomousDatabaseBase.DeepCopyInto(&out.AutonomousDatabaseBase) + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseDetails. +func (in *AutonomousDatabaseDetails) DeepCopy() *AutonomousDatabaseDetails { + if in == nil { + return nil + } + out := new(AutonomousDatabaseDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseList) DeepCopyInto(out *AutonomousDatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AutonomousDatabase, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseList. +func (in *AutonomousDatabaseList) DeepCopy() *AutonomousDatabaseList { + if in == nil { + return nil + } + out := new(AutonomousDatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutonomousDatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseRestore) DeepCopyInto(out *AutonomousDatabaseRestore) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseRestore. +func (in *AutonomousDatabaseRestore) DeepCopy() *AutonomousDatabaseRestore { + if in == nil { + return nil + } + out := new(AutonomousDatabaseRestore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutonomousDatabaseRestore) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseRestoreList) DeepCopyInto(out *AutonomousDatabaseRestoreList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AutonomousDatabaseRestore, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseRestoreList. +func (in *AutonomousDatabaseRestoreList) DeepCopy() *AutonomousDatabaseRestoreList { + if in == nil { + return nil + } + out := new(AutonomousDatabaseRestoreList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AutonomousDatabaseRestoreList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseRestoreSpec) DeepCopyInto(out *AutonomousDatabaseRestoreSpec) { + *out = *in + in.Target.DeepCopyInto(&out.Target) + in.Source.DeepCopyInto(&out.Source) + in.OCIConfig.DeepCopyInto(&out.OCIConfig) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseRestoreSpec. +func (in *AutonomousDatabaseRestoreSpec) DeepCopy() *AutonomousDatabaseRestoreSpec { + if in == nil { + return nil + } + out := new(AutonomousDatabaseRestoreSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseRestoreStatus) DeepCopyInto(out *AutonomousDatabaseRestoreStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseRestoreStatus. +func (in *AutonomousDatabaseRestoreStatus) DeepCopy() *AutonomousDatabaseRestoreStatus { + if in == nil { + return nil + } + out := new(AutonomousDatabaseRestoreStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseSpec) DeepCopyInto(out *AutonomousDatabaseSpec) { + *out = *in + in.Details.DeepCopyInto(&out.Details) + in.Clone.DeepCopyInto(&out.Clone) + in.Wallet.DeepCopyInto(&out.Wallet) + in.OciConfig.DeepCopyInto(&out.OciConfig) + if in.HardLink != nil { + in, out := &in.HardLink, &out.HardLink + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseSpec. +func (in *AutonomousDatabaseSpec) DeepCopy() *AutonomousDatabaseSpec { + if in == nil { + return nil + } + out := new(AutonomousDatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutonomousDatabaseStatus) DeepCopyInto(out *AutonomousDatabaseStatus) { + *out = *in + if in.AllConnectionStrings != nil { + in, out := &in.AllConnectionStrings, &out.AllConnectionStrings + *out = make([]ConnectionStringProfile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutonomousDatabaseStatus. +func (in *AutonomousDatabaseStatus) DeepCopy() *AutonomousDatabaseStatus { + if in == nil { + return nil + } + out := new(AutonomousDatabaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Backupconfig) DeepCopyInto(out *Backupconfig) { + *out = *in + if in.AutoBackupEnabled != nil { + in, out := &in.AutoBackupEnabled, &out.AutoBackupEnabled + *out = new(bool) + **out = **in + } + if in.RecoveryWindowsInDays != nil { + in, out := &in.RecoveryWindowsInDays, &out.RecoveryWindowsInDays + *out = new(int) + **out = **in + } + if in.AutoBackupWindow != nil { + in, out := &in.AutoBackupWindow, &out.AutoBackupWindow + *out = new(string) + **out = **in + } + if in.BackupDestinationDetails != nil { + in, out := &in.BackupDestinationDetails, &out.BackupDestinationDetails + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backupconfig. +func (in *Backupconfig) DeepCopy() *Backupconfig { + if in == nil { + return nil + } + out := new(Backupconfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDB) DeepCopyInto(out *CDB) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDB. +func (in *CDB) DeepCopy() *CDB { + if in == nil { + return nil + } + out := new(CDB) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CDB) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBAdminPassword) DeepCopyInto(out *CDBAdminPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBAdminPassword. +func (in *CDBAdminPassword) DeepCopy() *CDBAdminPassword { + if in == nil { + return nil + } + out := new(CDBAdminPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBAdminUser) DeepCopyInto(out *CDBAdminUser) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBAdminUser. +func (in *CDBAdminUser) DeepCopy() *CDBAdminUser { + if in == nil { + return nil + } + out := new(CDBAdminUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBList) DeepCopyInto(out *CDBList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CDB, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBList. +func (in *CDBList) DeepCopy() *CDBList { + if in == nil { + return nil + } + out := new(CDBList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CDBList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBPRIVKEY) DeepCopyInto(out *CDBPRIVKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBPRIVKEY. +func (in *CDBPRIVKEY) DeepCopy() *CDBPRIVKEY { + if in == nil { + return nil + } + out := new(CDBPRIVKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBPUBKEY) DeepCopyInto(out *CDBPUBKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBPUBKEY. +func (in *CDBPUBKEY) DeepCopy() *CDBPUBKEY { + if in == nil { + return nil + } + out := new(CDBPUBKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBSecret) DeepCopyInto(out *CDBSecret) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBSecret. +func (in *CDBSecret) DeepCopy() *CDBSecret { + if in == nil { + return nil + } + out := new(CDBSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBSpec) DeepCopyInto(out *CDBSpec) { + *out = *in + out.SysAdminPwd = in.SysAdminPwd + out.CDBAdminUser = in.CDBAdminUser + out.CDBAdminPwd = in.CDBAdminPwd + out.CDBTlsKey = in.CDBTlsKey + out.CDBTlsCrt = in.CDBTlsCrt + out.ORDSPwd = in.ORDSPwd + out.WebServerUser = in.WebServerUser + out.WebServerPwd = in.WebServerPwd + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.CDBPubKey = in.CDBPubKey + out.CDBPriKey = in.CDBPriKey +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBSpec. +func (in *CDBSpec) DeepCopy() *CDBSpec { + if in == nil { + return nil + } + out := new(CDBSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBStatus) DeepCopyInto(out *CDBStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBStatus. +func (in *CDBStatus) DeepCopy() *CDBStatus { + if in == nil { + return nil + } + out := new(CDBStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBSysAdminPassword) DeepCopyInto(out *CDBSysAdminPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBSysAdminPassword. +func (in *CDBSysAdminPassword) DeepCopy() *CDBSysAdminPassword { + if in == nil { + return nil + } + out := new(CDBSysAdminPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBTLSCRT) DeepCopyInto(out *CDBTLSCRT) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBTLSCRT. +func (in *CDBTLSCRT) DeepCopy() *CDBTLSCRT { + if in == nil { + return nil + } + out := new(CDBTLSCRT) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBTLSKEY) DeepCopyInto(out *CDBTLSKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBTLSKEY. +func (in *CDBTLSKEY) DeepCopy() *CDBTLSKEY { + if in == nil { + return nil + } + out := new(CDBTLSKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogSpec) DeepCopyInto(out *CatalogSpec) { + *out = *in + if in.EnvVars != nil { + in, out := &in.EnvVars, &out.EnvVars + *out = make([]EnvironmentVariable, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(corev1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PvAnnotations != nil { + in, out := &in.PvAnnotations, &out.PvAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PvMatchLabels != nil { + in, out := &in.PvMatchLabels, &out.PvMatchLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ImagePulllPolicy != nil { + in, out := &in.ImagePulllPolicy, &out.ImagePulllPolicy + *out = new(corev1.PullPolicy) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSpec. +func (in *CatalogSpec) DeepCopy() *CatalogSpec { + if in == nil { + return nil + } + out := new(CatalogSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSecret) DeepCopyInto(out *CertificateSecret) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSecret. +func (in *CertificateSecret) DeepCopy() *CertificateSecret { + if in == nil { + return nil + } + out := new(CertificateSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionStringProfile) DeepCopyInto(out *ConnectionStringProfile) { + *out = *in + if in.ConnectionStrings != nil { + in, out := &in.ConnectionStrings, &out.ConnectionStrings + *out = make([]ConnectionStringSpec, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionStringProfile. +func (in *ConnectionStringProfile) DeepCopy() *ConnectionStringProfile { + if in == nil { + return nil + } + out := new(ConnectionStringProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionStringSpec) DeepCopyInto(out *ConnectionStringSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionStringSpec. +func (in *ConnectionStringSpec) DeepCopy() *ConnectionStringSpec { + if in == nil { + return nil + } + out := new(ConnectionStringSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DBWalletSecret) DeepCopyInto(out *DBWalletSecret) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DBWalletSecret. +func (in *DBWalletSecret) DeepCopy() *DBWalletSecret { + if in == nil { + return nil + } + out := new(DBWalletSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataguardBroker) DeepCopyInto(out *DataguardBroker) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataguardBroker. +func (in *DataguardBroker) DeepCopy() *DataguardBroker { + if in == nil { + return nil + } + out := new(DataguardBroker) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataguardBroker) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataguardBrokerList) DeepCopyInto(out *DataguardBrokerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DataguardBroker, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataguardBrokerList. +func (in *DataguardBrokerList) DeepCopy() *DataguardBrokerList { + if in == nil { + return nil + } + out := new(DataguardBrokerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataguardBrokerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataguardBrokerSpec) DeepCopyInto(out *DataguardBrokerSpec) { + *out = *in + if in.StandbyDatabaseRefs != nil { + in, out := &in.StandbyDatabaseRefs, &out.StandbyDatabaseRefs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ServiceAnnotations != nil { + in, out := &in.ServiceAnnotations, &out.ServiceAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataguardBrokerSpec. +func (in *DataguardBrokerSpec) DeepCopy() *DataguardBrokerSpec { + if in == nil { + return nil + } + out := new(DataguardBrokerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataguardBrokerStatus) DeepCopyInto(out *DataguardBrokerStatus) { + *out = *in + if in.DatabasesInDataguardConfig != nil { + in, out := &in.DatabasesInDataguardConfig, &out.DatabasesInDataguardConfig + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataguardBrokerStatus. +func (in *DataguardBrokerStatus) DeepCopy() *DataguardBrokerStatus { + if in == nil { + return nil + } + out := new(DataguardBrokerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbCloneConfig) DeepCopyInto(out *DbCloneConfig) { + *out = *in + if in.SshPublicKeys != nil { + in, out := &in.SshPublicKeys, &out.SshPublicKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbCloneConfig. +func (in *DbCloneConfig) DeepCopy() *DbCloneConfig { + if in == nil { + return nil + } + out := new(DbCloneConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbCloneStatus) DeepCopyInto(out *DbCloneStatus) { + *out = *in + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } + if in.SshPublicKeys != nil { + in, out := &in.SshPublicKeys, &out.SshPublicKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbCloneStatus. +func (in *DbCloneStatus) DeepCopy() *DbCloneStatus { + if in == nil { + return nil + } + out := new(DbCloneStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbStatus) DeepCopyInto(out *DbStatus) { + *out = *in + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbStatus. +func (in *DbStatus) DeepCopy() *DbStatus { + if in == nil { + return nil + } + out := new(DbStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbSystemDetails) DeepCopyInto(out *DbSystemDetails) { + *out = *in + if in.SshPublicKeys != nil { + in, out := &in.SshPublicKeys, &out.SshPublicKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.FaultDomains != nil { + in, out := &in.FaultDomains, &out.FaultDomains + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NodeCount != nil { + in, out := &in.NodeCount, &out.NodeCount + *out = new(int) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.DbBackupConfig.DeepCopyInto(&out.DbBackupConfig) + out.KMSConfig = in.KMSConfig +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbSystemDetails. +func (in *DbSystemDetails) DeepCopy() *DbSystemDetails { + if in == nil { + return nil + } + out := new(DbSystemDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbWorkrequests) DeepCopyInto(out *DbWorkrequests) { + *out = *in + if in.OperationType != nil { + in, out := &in.OperationType, &out.OperationType + *out = new(string) + **out = **in + } + if in.OperationId != nil { + in, out := &in.OperationId, &out.OperationId + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbWorkrequests. +func (in *DbWorkrequests) DeepCopy() *DbWorkrequests { + if in == nil { + return nil + } + out := new(DbWorkrequests) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbcsSystem) DeepCopyInto(out *DbcsSystem) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbcsSystem. +func (in *DbcsSystem) DeepCopy() *DbcsSystem { + if in == nil { + return nil + } + out := new(DbcsSystem) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DbcsSystem) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbcsSystemList) DeepCopyInto(out *DbcsSystemList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DbcsSystem, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbcsSystemList. +func (in *DbcsSystemList) DeepCopy() *DbcsSystemList { + if in == nil { + return nil + } + out := new(DbcsSystemList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DbcsSystemList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbcsSystemSpec) DeepCopyInto(out *DbcsSystemSpec) { + *out = *in + in.DbSystem.DeepCopyInto(&out.DbSystem) + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } + if in.OCIConfigMap != nil { + in, out := &in.OCIConfigMap, &out.OCIConfigMap + *out = new(string) + **out = **in + } + if in.OCISecret != nil { + in, out := &in.OCISecret, &out.OCISecret + *out = new(string) + **out = **in + } + if in.DbClone != nil { + in, out := &in.DbClone, &out.DbClone + *out = new(DbCloneConfig) + (*in).DeepCopyInto(*out) + } + if in.PdbConfigs != nil { + in, out := &in.PdbConfigs, &out.PdbConfigs + *out = make([]PDBConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DbBackupId != nil { + in, out := &in.DbBackupId, &out.DbBackupId + *out = new(string) + **out = **in + } + if in.DatabaseId != nil { + in, out := &in.DatabaseId, &out.DatabaseId + *out = new(string) + **out = **in + } + out.KMSConfig = in.KMSConfig +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbcsSystemSpec. +func (in *DbcsSystemSpec) DeepCopy() *DbcsSystemSpec { + if in == nil { + return nil + } + out := new(DbcsSystemSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DbcsSystemStatus) DeepCopyInto(out *DbcsSystemStatus) { + *out = *in + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } + if in.DataStoragePercentage != nil { + in, out := &in.DataStoragePercentage, &out.DataStoragePercentage + *out = new(int) + **out = **in + } + if in.DataStorageSizeInGBs != nil { + in, out := &in.DataStorageSizeInGBs, &out.DataStorageSizeInGBs + *out = new(int) + **out = **in + } + if in.RecoStorageSizeInGB != nil { + in, out := &in.RecoStorageSizeInGB, &out.RecoStorageSizeInGB + *out = new(int) + **out = **in + } + if in.Shape != nil { + in, out := &in.Shape, &out.Shape + *out = new(string) + **out = **in + } + if in.DbInfo != nil { + in, out := &in.DbInfo, &out.DbInfo + *out = make([]DbStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Network.DeepCopyInto(&out.Network) + if in.WorkRequests != nil { + in, out := &in.WorkRequests, &out.WorkRequests + *out = make([]DbWorkrequests, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.KMSDetailsStatus = in.KMSDetailsStatus + in.DbCloneStatus.DeepCopyInto(&out.DbCloneStatus) + if in.PdbDetailsStatus != nil { + in, out := &in.PdbDetailsStatus, &out.PdbDetailsStatus + *out = make([]PDBDetailsStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DbcsSystemStatus. +func (in *DbcsSystemStatus) DeepCopy() *DbcsSystemStatus { + if in == nil { + return nil + } + out := new(DbcsSystemStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvironmentVariable) DeepCopyInto(out *EnvironmentVariable) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvironmentVariable. +func (in *EnvironmentVariable) DeepCopy() *EnvironmentVariable { + if in == nil { + return nil + } + out := new(EnvironmentVariable) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalSettings) DeepCopyInto(out *GlobalSettings) { + *out = *in + if in.CacheMetadataEnabled != nil { + in, out := &in.CacheMetadataEnabled, &out.CacheMetadataEnabled + *out = new(bool) + **out = **in + } + if in.CacheMetadataGraphQLExpireAfterAccess != nil { + in, out := &in.CacheMetadataGraphQLExpireAfterAccess, &out.CacheMetadataGraphQLExpireAfterAccess + *out = new(timex.Duration) + **out = **in + } + if in.CacheMetadataGraphQLExpireAfterWrite != nil { + in, out := &in.CacheMetadataGraphQLExpireAfterWrite, &out.CacheMetadataGraphQLExpireAfterWrite + *out = new(timex.Duration) + **out = **in + } + if in.CacheMetadataTimeout != nil { + in, out := &in.CacheMetadataTimeout, &out.CacheMetadataTimeout + *out = new(timex.Duration) + **out = **in + } + if in.CacheMetadataJWKSEnabled != nil { + in, out := &in.CacheMetadataJWKSEnabled, &out.CacheMetadataJWKSEnabled + *out = new(bool) + **out = **in + } + if in.CacheMetadataJWKSInitialCapacity != nil { + in, out := &in.CacheMetadataJWKSInitialCapacity, &out.CacheMetadataJWKSInitialCapacity + *out = new(int32) + **out = **in + } + if in.CacheMetadataJWKSMaximumSize != nil { + in, out := &in.CacheMetadataJWKSMaximumSize, &out.CacheMetadataJWKSMaximumSize + *out = new(int32) + **out = **in + } + if in.CacheMetadataJWKSExpireAfterAccess != nil { + in, out := &in.CacheMetadataJWKSExpireAfterAccess, &out.CacheMetadataJWKSExpireAfterAccess + *out = new(timex.Duration) + **out = **in + } + if in.CacheMetadataJWKSExpireAfterWrite != nil { + in, out := &in.CacheMetadataJWKSExpireAfterWrite, &out.CacheMetadataJWKSExpireAfterWrite + *out = new(timex.Duration) + **out = **in + } + if in.DatabaseAPIEnabled != nil { + in, out := &in.DatabaseAPIEnabled, &out.DatabaseAPIEnabled + *out = new(bool) + **out = **in + } + if in.DatabaseAPIManagementServicesDisabled != nil { + in, out := &in.DatabaseAPIManagementServicesDisabled, &out.DatabaseAPIManagementServicesDisabled + *out = new(bool) + **out = **in + } + if in.DBInvalidPoolTimeout != nil { + in, out := &in.DBInvalidPoolTimeout, &out.DBInvalidPoolTimeout + *out = new(timex.Duration) + **out = **in + } + if in.FeatureGraphQLMaxNestingDepth != nil { + in, out := &in.FeatureGraphQLMaxNestingDepth, &out.FeatureGraphQLMaxNestingDepth + *out = new(int32) + **out = **in + } + if in.SecurityCredentialsAttempts != nil { + in, out := &in.SecurityCredentialsAttempts, &out.SecurityCredentialsAttempts + *out = new(int32) + **out = **in + } + if in.SecurityCredentialsLockTime != nil { + in, out := &in.SecurityCredentialsLockTime, &out.SecurityCredentialsLockTime + *out = new(timex.Duration) + **out = **in + } + if in.StandaloneHTTPPort != nil { + in, out := &in.StandaloneHTTPPort, &out.StandaloneHTTPPort + *out = new(int32) + **out = **in + } + if in.StandaloneHTTPSPort != nil { + in, out := &in.StandaloneHTTPSPort, &out.StandaloneHTTPSPort + *out = new(int32) + **out = **in + } + if in.StandaloneStopTimeout != nil { + in, out := &in.StandaloneStopTimeout, &out.StandaloneStopTimeout + *out = new(timex.Duration) + **out = **in + } + if in.DebugPrintDebugToScreen != nil { + in, out := &in.DebugPrintDebugToScreen, &out.DebugPrintDebugToScreen + *out = new(bool) + **out = **in + } + if in.ICAPPort != nil { + in, out := &in.ICAPPort, &out.ICAPPort + *out = new(int32) + **out = **in + } + if in.ICAPSecurePort != nil { + in, out := &in.ICAPSecurePort, &out.ICAPSecurePort + *out = new(int32) + **out = **in + } + if in.MongoPort != nil { + in, out := &in.MongoPort, &out.MongoPort + *out = new(int32) + **out = **in + } + if in.MongoIdleTimeout != nil { + in, out := &in.MongoIdleTimeout, &out.MongoIdleTimeout + *out = new(timex.Duration) + **out = **in + } + if in.MongoOpTimeout != nil { + in, out := &in.MongoOpTimeout, &out.MongoOpTimeout + *out = new(timex.Duration) + **out = **in + } + if in.SecurityDisableDefaultExclusionList != nil { + in, out := &in.SecurityDisableDefaultExclusionList, &out.SecurityDisableDefaultExclusionList + *out = new(bool) + **out = **in + } + if in.SecurityMaxEntries != nil { + in, out := &in.SecurityMaxEntries, &out.SecurityMaxEntries + *out = new(int32) + **out = **in + } + if in.SecurityVerifySSL != nil { + in, out := &in.SecurityVerifySSL, &out.SecurityVerifySSL + *out = new(bool) + **out = **in + } + if in.CertSecret != nil { + in, out := &in.CertSecret, &out.CertSecret + *out = new(CertificateSecret) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalSettings. +func (in *GlobalSettings) DeepCopy() *GlobalSettings { + if in == nil { + return nil + } + out := new(GlobalSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GsmServiceSpec) DeepCopyInto(out *GsmServiceSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GsmServiceSpec. +func (in *GsmServiceSpec) DeepCopy() *GsmServiceSpec { + if in == nil { + return nil + } + out := new(GsmServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GsmShardDetails) DeepCopyInto(out *GsmShardDetails) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GsmShardDetails. +func (in *GsmShardDetails) DeepCopy() *GsmShardDetails { + if in == nil { + return nil + } + out := new(GsmShardDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GsmShardGroupSpec) DeepCopyInto(out *GsmShardGroupSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GsmShardGroupSpec. +func (in *GsmShardGroupSpec) DeepCopy() *GsmShardGroupSpec { + if in == nil { + return nil + } + out := new(GsmShardGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GsmShardSpaceSpec) DeepCopyInto(out *GsmShardSpaceSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GsmShardSpaceSpec. +func (in *GsmShardSpaceSpec) DeepCopy() *GsmShardSpaceSpec { + if in == nil { + return nil + } + out := new(GsmShardSpaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GsmSpec) DeepCopyInto(out *GsmSpec) { + *out = *in + if in.EnvVars != nil { + in, out := &in.EnvVars, &out.EnvVars + *out = make([]EnvironmentVariable, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(corev1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PvAnnotations != nil { + in, out := &in.PvAnnotations, &out.PvAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PvMatchLabels != nil { + in, out := &in.PvMatchLabels, &out.PvMatchLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ImagePulllPolicy != nil { + in, out := &in.ImagePulllPolicy, &out.ImagePulllPolicy + *out = new(corev1.PullPolicy) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GsmSpec. +func (in *GsmSpec) DeepCopy() *GsmSpec { + if in == nil { + return nil + } + out := new(GsmSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GsmStatus) DeepCopyInto(out *GsmStatus) { + *out = *in + if in.Shards != nil { + in, out := &in.Shards, &out.Shards + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Details != nil { + in, out := &in.Details, &out.Details + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GsmStatus. +func (in *GsmStatus) DeepCopy() *GsmStatus { + if in == nil { + return nil + } + out := new(GsmStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GsmStatusDetails) DeepCopyInto(out *GsmStatusDetails) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GsmStatusDetails. +func (in *GsmStatusDetails) DeepCopy() *GsmStatusDetails { + if in == nil { + return nil + } + out := new(GsmStatusDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K8sADBBackupSpec) DeepCopyInto(out *K8sADBBackupSpec) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sADBBackupSpec. +func (in *K8sADBBackupSpec) DeepCopy() *K8sADBBackupSpec { + if in == nil { + return nil + } + out := new(K8sADBBackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K8sAcdSpec) DeepCopyInto(out *K8sAcdSpec) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sAcdSpec. +func (in *K8sAcdSpec) DeepCopy() *K8sAcdSpec { + if in == nil { + return nil + } + out := new(K8sAcdSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K8sAdbSpec) DeepCopyInto(out *K8sAdbSpec) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sAdbSpec. +func (in *K8sAdbSpec) DeepCopy() *K8sAdbSpec { + if in == nil { + return nil + } + out := new(K8sAdbSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *K8sSecretSpec) DeepCopyInto(out *K8sSecretSpec) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sSecretSpec. +func (in *K8sSecretSpec) DeepCopy() *K8sSecretSpec { + if in == nil { + return nil + } + out := new(K8sSecretSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KMSConfig) DeepCopyInto(out *KMSConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSConfig. +func (in *KMSConfig) DeepCopy() *KMSConfig { + if in == nil { + return nil + } + out := new(KMSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KMSDetailsStatus) DeepCopyInto(out *KMSDetailsStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSDetailsStatus. +func (in *KMSDetailsStatus) DeepCopy() *KMSDetailsStatus { + if in == nil { + return nil + } + out := new(KMSDetailsStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LREST) DeepCopyInto(out *LREST) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LREST. +func (in *LREST) DeepCopy() *LREST { + if in == nil { + return nil + } + out := new(LREST) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LREST) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRESTAdminPassword) DeepCopyInto(out *LRESTAdminPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRESTAdminPassword. +func (in *LRESTAdminPassword) DeepCopy() *LRESTAdminPassword { + if in == nil { + return nil + } + out := new(LRESTAdminPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRESTAdminUser) DeepCopyInto(out *LRESTAdminUser) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRESTAdminUser. +func (in *LRESTAdminUser) DeepCopy() *LRESTAdminUser { + if in == nil { + return nil + } + out := new(LRESTAdminUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRESTList) DeepCopyInto(out *LRESTList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LREST, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRESTList. +func (in *LRESTList) DeepCopy() *LRESTList { + if in == nil { + return nil + } + out := new(LRESTList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LRESTList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRESTPRVKEY) DeepCopyInto(out *LRESTPRVKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRESTPRVKEY. +func (in *LRESTPRVKEY) DeepCopy() *LRESTPRVKEY { + if in == nil { + return nil + } + out := new(LRESTPRVKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRESTPUBKEY) DeepCopyInto(out *LRESTPUBKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRESTPUBKEY. +func (in *LRESTPUBKEY) DeepCopy() *LRESTPUBKEY { + if in == nil { + return nil + } + out := new(LRESTPUBKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRESTPassword) DeepCopyInto(out *LRESTPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRESTPassword. +func (in *LRESTPassword) DeepCopy() *LRESTPassword { + if in == nil { + return nil + } + out := new(LRESTPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRESTSecret) DeepCopyInto(out *LRESTSecret) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRESTSecret. +func (in *LRESTSecret) DeepCopy() *LRESTSecret { + if in == nil { + return nil + } + out := new(LRESTSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRESTSpec) DeepCopyInto(out *LRESTSpec) { + *out = *in + out.SysAdminPwd = in.SysAdminPwd + out.LRESTAdminUser = in.LRESTAdminUser + out.LRESTAdminPwd = in.LRESTAdminPwd + out.LRESTTlsKey = in.LRESTTlsKey + out.LRESTTlsCrt = in.LRESTTlsCrt + out.LRESTPubKey = in.LRESTPubKey + out.LRESTPriKey = in.LRESTPriKey + out.LRESTPwd = in.LRESTPwd + out.WebLrestServerUser = in.WebLrestServerUser + out.WebLrestServerPwd = in.WebLrestServerPwd + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRESTSpec. +func (in *LRESTSpec) DeepCopy() *LRESTSpec { + if in == nil { + return nil + } + out := new(LRESTSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRESTStatus) DeepCopyInto(out *LRESTStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRESTStatus. +func (in *LRESTStatus) DeepCopy() *LRESTStatus { + if in == nil { + return nil + } + out := new(LRESTStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRESTSysAdminPassword) DeepCopyInto(out *LRESTSysAdminPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRESTSysAdminPassword. +func (in *LRESTSysAdminPassword) DeepCopy() *LRESTSysAdminPassword { + if in == nil { + return nil + } + out := new(LRESTSysAdminPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRESTTLSCRT) DeepCopyInto(out *LRESTTLSCRT) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRESTTLSCRT. +func (in *LRESTTLSCRT) DeepCopy() *LRESTTLSCRT { + if in == nil { + return nil + } + out := new(LRESTTLSCRT) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRESTTLSKEY) DeepCopyInto(out *LRESTTLSKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRESTTLSKEY. +func (in *LRESTTLSKEY) DeepCopy() *LRESTTLSKEY { + if in == nil { + return nil + } + out := new(LRESTTLSKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRPDB) DeepCopyInto(out *LRPDB) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRPDB. +func (in *LRPDB) DeepCopy() *LRPDB { + if in == nil { + return nil + } + out := new(LRPDB) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LRPDB) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRPDBAdminName) DeepCopyInto(out *LRPDBAdminName) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRPDBAdminName. +func (in *LRPDBAdminName) DeepCopy() *LRPDBAdminName { + if in == nil { + return nil + } + out := new(LRPDBAdminName) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRPDBAdminPassword) DeepCopyInto(out *LRPDBAdminPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRPDBAdminPassword. +func (in *LRPDBAdminPassword) DeepCopy() *LRPDBAdminPassword { + if in == nil { + return nil + } + out := new(LRPDBAdminPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRPDBList) DeepCopyInto(out *LRPDBList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LRPDB, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRPDBList. +func (in *LRPDBList) DeepCopy() *LRPDBList { + if in == nil { + return nil + } + out := new(LRPDBList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LRPDBList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRPDBPRVKEY) DeepCopyInto(out *LRPDBPRVKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRPDBPRVKEY. +func (in *LRPDBPRVKEY) DeepCopy() *LRPDBPRVKEY { + if in == nil { + return nil + } + out := new(LRPDBPRVKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRPDBSecret) DeepCopyInto(out *LRPDBSecret) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRPDBSecret. +func (in *LRPDBSecret) DeepCopy() *LRPDBSecret { + if in == nil { + return nil + } + out := new(LRPDBSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRPDBSpec) DeepCopyInto(out *LRPDBSpec) { + *out = *in + out.LRPDBTlsKey = in.LRPDBTlsKey + out.LRPDBTlsCrt = in.LRPDBTlsCrt + out.LRPDBTlsCat = in.LRPDBTlsCat + out.LRPDBPriKey = in.LRPDBPriKey + out.AdminName = in.AdminName + out.AdminPwd = in.AdminPwd + out.AdminpdbUser = in.AdminpdbUser + out.AdminpdbPass = in.AdminpdbPass + if in.ReuseTempFile != nil { + in, out := &in.ReuseTempFile, &out.ReuseTempFile + *out = new(bool) + **out = **in + } + if in.UnlimitedStorage != nil { + in, out := &in.UnlimitedStorage, &out.UnlimitedStorage + *out = new(bool) + **out = **in + } + if in.AsClone != nil { + in, out := &in.AsClone, &out.AsClone + *out = new(bool) + **out = **in + } + out.WebLrpdbServerUser = in.WebLrpdbServerUser + out.WebLrpdbServerPwd = in.WebLrpdbServerPwd + if in.LTDEImport != nil { + in, out := &in.LTDEImport, &out.LTDEImport + *out = new(bool) + **out = **in + } + if in.LTDEExport != nil { + in, out := &in.LTDEExport, &out.LTDEExport + *out = new(bool) + **out = **in + } + out.LTDEPassword = in.LTDEPassword + out.LTDESecret = in.LTDESecret + if in.GetScript != nil { + in, out := &in.GetScript, &out.GetScript + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRPDBSpec. +func (in *LRPDBSpec) DeepCopy() *LRPDBSpec { + if in == nil { + return nil + } + out := new(LRPDBSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRPDBStatus) DeepCopyInto(out *LRPDBStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRPDBStatus. +func (in *LRPDBStatus) DeepCopy() *LRPDBStatus { + if in == nil { + return nil + } + out := new(LRPDBStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRPDBTLSCAT) DeepCopyInto(out *LRPDBTLSCAT) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRPDBTLSCAT. +func (in *LRPDBTLSCAT) DeepCopy() *LRPDBTLSCAT { + if in == nil { + return nil + } + out := new(LRPDBTLSCAT) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRPDBTLSCRT) DeepCopyInto(out *LRPDBTLSCRT) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRPDBTLSCRT. +func (in *LRPDBTLSCRT) DeepCopy() *LRPDBTLSCRT { + if in == nil { + return nil + } + out := new(LRPDBTLSCRT) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LRPDBTLSKEY) DeepCopyInto(out *LRPDBTLSKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LRPDBTLSKEY. +func (in *LRPDBTLSKEY) DeepCopy() *LRPDBTLSKEY { + if in == nil { + return nil + } + out := new(LRPDBTLSKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LTDEPwd) DeepCopyInto(out *LTDEPwd) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LTDEPwd. +func (in *LTDEPwd) DeepCopy() *LTDEPwd { + if in == nil { + return nil + } + out := new(LTDEPwd) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LTDESecret) DeepCopyInto(out *LTDESecret) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LTDESecret. +func (in *LTDESecret) DeepCopy() *LTDESecret { + if in == nil { + return nil + } + out := new(LTDESecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ORDSPassword) DeepCopyInto(out *ORDSPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ORDSPassword. +func (in *ORDSPassword) DeepCopy() *ORDSPassword { + if in == nil { + return nil + } + out := new(ORDSPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OciAcdSpec) DeepCopyInto(out *OciAcdSpec) { + *out = *in + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OciAcdSpec. +func (in *OciAcdSpec) DeepCopy() *OciAcdSpec { + if in == nil { + return nil + } + out := new(OciAcdSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OciAdbSpec) DeepCopyInto(out *OciAdbSpec) { + *out = *in + if in.OCID != nil { + in, out := &in.OCID, &out.OCID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OciAdbSpec. +func (in *OciAdbSpec) DeepCopy() *OciAdbSpec { + if in == nil { + return nil + } + out := new(OciAdbSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OciConfigSpec) DeepCopyInto(out *OciConfigSpec) { + *out = *in + if in.ConfigMapName != nil { + in, out := &in.ConfigMapName, &out.ConfigMapName + *out = new(string) + **out = **in + } + if in.SecretName != nil { + in, out := &in.SecretName, &out.SecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OciConfigSpec. +func (in *OciConfigSpec) DeepCopy() *OciConfigSpec { + if in == nil { + return nil + } + out := new(OciConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OciSecretSpec) DeepCopyInto(out *OciSecretSpec) { + *out = *in + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OciSecretSpec. +func (in *OciSecretSpec) DeepCopy() *OciSecretSpec { + if in == nil { + return nil + } + out := new(OciSecretSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OracleRestDataService) DeepCopyInto(out *OracleRestDataService) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OracleRestDataService. +func (in *OracleRestDataService) DeepCopy() *OracleRestDataService { + if in == nil { + return nil + } + out := new(OracleRestDataService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OracleRestDataService) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OracleRestDataServiceImage) DeepCopyInto(out *OracleRestDataServiceImage) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OracleRestDataServiceImage. +func (in *OracleRestDataServiceImage) DeepCopy() *OracleRestDataServiceImage { + if in == nil { + return nil + } + out := new(OracleRestDataServiceImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OracleRestDataServiceList) DeepCopyInto(out *OracleRestDataServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OracleRestDataService, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OracleRestDataServiceList. +func (in *OracleRestDataServiceList) DeepCopy() *OracleRestDataServiceList { + if in == nil { + return nil + } + out := new(OracleRestDataServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OracleRestDataServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OracleRestDataServicePassword) DeepCopyInto(out *OracleRestDataServicePassword) { + *out = *in + if in.KeepSecret != nil { + in, out := &in.KeepSecret, &out.KeepSecret + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OracleRestDataServicePassword. +func (in *OracleRestDataServicePassword) DeepCopy() *OracleRestDataServicePassword { + if in == nil { + return nil + } + out := new(OracleRestDataServicePassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OracleRestDataServicePersistence) DeepCopyInto(out *OracleRestDataServicePersistence) { + *out = *in + if in.SetWritePermissions != nil { + in, out := &in.SetWritePermissions, &out.SetWritePermissions + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OracleRestDataServicePersistence. +func (in *OracleRestDataServicePersistence) DeepCopy() *OracleRestDataServicePersistence { + if in == nil { + return nil + } + out := new(OracleRestDataServicePersistence) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OracleRestDataServiceRestEnableSchemas) DeepCopyInto(out *OracleRestDataServiceRestEnableSchemas) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OracleRestDataServiceRestEnableSchemas. +func (in *OracleRestDataServiceRestEnableSchemas) DeepCopy() *OracleRestDataServiceRestEnableSchemas { + if in == nil { + return nil + } + out := new(OracleRestDataServiceRestEnableSchemas) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OracleRestDataServiceSpec) DeepCopyInto(out *OracleRestDataServiceSpec) { + *out = *in + if in.ServiceAnnotations != nil { + in, out := &in.ServiceAnnotations, &out.ServiceAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.Image = in.Image + in.OrdsPassword.DeepCopyInto(&out.OrdsPassword) + in.AdminPassword.DeepCopyInto(&out.AdminPassword) + if in.RestEnableSchemas != nil { + in, out := &in.RestEnableSchemas, &out.RestEnableSchemas + *out = make([]OracleRestDataServiceRestEnableSchemas, len(*in)) + copy(*out, *in) + } + in.Persistence.DeepCopyInto(&out.Persistence) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OracleRestDataServiceSpec. +func (in *OracleRestDataServiceSpec) DeepCopy() *OracleRestDataServiceSpec { + if in == nil { + return nil + } + out := new(OracleRestDataServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OracleRestDataServiceStatus) DeepCopyInto(out *OracleRestDataServiceStatus) { + *out = *in + out.Image = in.Image +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OracleRestDataServiceStatus. +func (in *OracleRestDataServiceStatus) DeepCopy() *OracleRestDataServiceStatus { + if in == nil { + return nil + } + out := new(OracleRestDataServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrdsSrvs) DeepCopyInto(out *OrdsSrvs) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrdsSrvs. +func (in *OrdsSrvs) DeepCopy() *OrdsSrvs { + if in == nil { + return nil + } + out := new(OrdsSrvs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OrdsSrvs) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrdsSrvsList) DeepCopyInto(out *OrdsSrvsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OrdsSrvs, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrdsSrvsList. +func (in *OrdsSrvsList) DeepCopy() *OrdsSrvsList { + if in == nil { + return nil + } + out := new(OrdsSrvsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OrdsSrvsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrdsSrvsSpec) DeepCopyInto(out *OrdsSrvsSpec) { + *out = *in + in.GlobalSettings.DeepCopyInto(&out.GlobalSettings) + out.EncPrivKey = in.EncPrivKey + if in.PoolSettings != nil { + in, out := &in.PoolSettings, &out.PoolSettings + *out = make([]*PoolSettings, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(PoolSettings) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrdsSrvsSpec. +func (in *OrdsSrvsSpec) DeepCopy() *OrdsSrvsSpec { + if in == nil { + return nil + } + out := new(OrdsSrvsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrdsSrvsStatus) DeepCopyInto(out *OrdsSrvsStatus) { + *out = *in + if in.HTTPPort != nil { + in, out := &in.HTTPPort, &out.HTTPPort + *out = new(int32) + **out = **in + } + if in.HTTPSPort != nil { + in, out := &in.HTTPSPort, &out.HTTPSPort + *out = new(int32) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrdsSrvsStatus. +func (in *OrdsSrvsStatus) DeepCopy() *OrdsSrvsStatus { + if in == nil { + return nil + } + out := new(OrdsSrvsStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDB) DeepCopyInto(out *PDB) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDB. +func (in *PDB) DeepCopy() *PDB { + if in == nil { + return nil + } + out := new(PDB) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PDB) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBAdminName) DeepCopyInto(out *PDBAdminName) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBAdminName. +func (in *PDBAdminName) DeepCopy() *PDBAdminName { + if in == nil { + return nil + } + out := new(PDBAdminName) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBAdminPassword) DeepCopyInto(out *PDBAdminPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBAdminPassword. +func (in *PDBAdminPassword) DeepCopy() *PDBAdminPassword { + if in == nil { + return nil + } + out := new(PDBAdminPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBConfig) DeepCopyInto(out *PDBConfig) { + *out = *in + if in.PdbName != nil { + in, out := &in.PdbName, &out.PdbName + *out = new(string) + **out = **in + } + if in.PdbAdminPassword != nil { + in, out := &in.PdbAdminPassword, &out.PdbAdminPassword + *out = new(string) + **out = **in + } + if in.TdeWalletPassword != nil { + in, out := &in.TdeWalletPassword, &out.TdeWalletPassword + *out = new(string) + **out = **in + } + if in.ShouldPdbAdminAccountBeLocked != nil { + in, out := &in.ShouldPdbAdminAccountBeLocked, &out.ShouldPdbAdminAccountBeLocked + *out = new(bool) + **out = **in + } + if in.FreeformTags != nil { + in, out := &in.FreeformTags, &out.FreeformTags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.IsDelete != nil { + in, out := &in.IsDelete, &out.IsDelete + *out = new(bool) + **out = **in + } + if in.PluggableDatabaseId != nil { + in, out := &in.PluggableDatabaseId, &out.PluggableDatabaseId + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBConfig. +func (in *PDBConfig) DeepCopy() *PDBConfig { + if in == nil { + return nil + } + out := new(PDBConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBConfigStatus) DeepCopyInto(out *PDBConfigStatus) { + *out = *in + if in.PdbName != nil { + in, out := &in.PdbName, &out.PdbName + *out = new(string) + **out = **in + } + if in.ShouldPdbAdminAccountBeLocked != nil { + in, out := &in.ShouldPdbAdminAccountBeLocked, &out.ShouldPdbAdminAccountBeLocked + *out = new(bool) + **out = **in + } + if in.FreeformTags != nil { + in, out := &in.FreeformTags, &out.FreeformTags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PluggableDatabaseId != nil { + in, out := &in.PluggableDatabaseId, &out.PluggableDatabaseId + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBConfigStatus. +func (in *PDBConfigStatus) DeepCopy() *PDBConfigStatus { + if in == nil { + return nil + } + out := new(PDBConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBDetailsStatus) DeepCopyInto(out *PDBDetailsStatus) { + *out = *in + if in.PDBConfigStatus != nil { + in, out := &in.PDBConfigStatus, &out.PDBConfigStatus + *out = make([]PDBConfigStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBDetailsStatus. +func (in *PDBDetailsStatus) DeepCopy() *PDBDetailsStatus { + if in == nil { + return nil + } + out := new(PDBDetailsStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBList) DeepCopyInto(out *PDBList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PDB, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBList. +func (in *PDBList) DeepCopy() *PDBList { + if in == nil { + return nil + } + out := new(PDBList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PDBList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBPRIVKEY) DeepCopyInto(out *PDBPRIVKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBPRIVKEY. +func (in *PDBPRIVKEY) DeepCopy() *PDBPRIVKEY { + if in == nil { + return nil + } + out := new(PDBPRIVKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBPUBKEY) DeepCopyInto(out *PDBPUBKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBPUBKEY. +func (in *PDBPUBKEY) DeepCopy() *PDBPUBKEY { + if in == nil { + return nil + } + out := new(PDBPUBKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBSecret) DeepCopyInto(out *PDBSecret) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBSecret. +func (in *PDBSecret) DeepCopy() *PDBSecret { + if in == nil { + return nil + } + out := new(PDBSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBSpec) DeepCopyInto(out *PDBSpec) { + *out = *in + out.PDBTlsKey = in.PDBTlsKey + out.PDBTlsCrt = in.PDBTlsCrt + out.PDBTlsCat = in.PDBTlsCat + out.AdminName = in.AdminName + out.AdminPwd = in.AdminPwd + out.WebServerUsr = in.WebServerUsr + out.WebServerPwd = in.WebServerPwd + if in.ReuseTempFile != nil { + in, out := &in.ReuseTempFile, &out.ReuseTempFile + *out = new(bool) + **out = **in + } + if in.UnlimitedStorage != nil { + in, out := &in.UnlimitedStorage, &out.UnlimitedStorage + *out = new(bool) + **out = **in + } + if in.AsClone != nil { + in, out := &in.AsClone, &out.AsClone + *out = new(bool) + **out = **in + } + if in.TDEImport != nil { + in, out := &in.TDEImport, &out.TDEImport + *out = new(bool) + **out = **in + } + if in.TDEExport != nil { + in, out := &in.TDEExport, &out.TDEExport + *out = new(bool) + **out = **in + } + out.TDEPassword = in.TDEPassword + out.TDESecret = in.TDESecret + if in.GetScript != nil { + in, out := &in.GetScript, &out.GetScript + *out = new(bool) + **out = **in + } + out.PDBPubKey = in.PDBPubKey + out.PDBPriKey = in.PDBPriKey +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBSpec. +func (in *PDBSpec) DeepCopy() *PDBSpec { + if in == nil { + return nil + } + out := new(PDBSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBStatus) DeepCopyInto(out *PDBStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBStatus. +func (in *PDBStatus) DeepCopy() *PDBStatus { + if in == nil { + return nil + } + out := new(PDBStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBTLSCAT) DeepCopyInto(out *PDBTLSCAT) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBTLSCAT. +func (in *PDBTLSCAT) DeepCopy() *PDBTLSCAT { + if in == nil { + return nil + } + out := new(PDBTLSCAT) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBTLSCRT) DeepCopyInto(out *PDBTLSCRT) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBTLSCRT. +func (in *PDBTLSCRT) DeepCopy() *PDBTLSCRT { + if in == nil { + return nil + } + out := new(PDBTLSCRT) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBTLSKEY) DeepCopyInto(out *PDBTLSKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBTLSKEY. +func (in *PDBTLSKEY) DeepCopy() *PDBTLSKEY { + if in == nil { + return nil + } + out := new(PDBTLSKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PITSpec) DeepCopyInto(out *PITSpec) { + *out = *in + if in.Timestamp != nil { + in, out := &in.Timestamp, &out.Timestamp + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PITSpec. +func (in *PITSpec) DeepCopy() *PITSpec { + if in == nil { + return nil + } + out := new(PITSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PasswordSecret) DeepCopyInto(out *PasswordSecret) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PasswordSecret. +func (in *PasswordSecret) DeepCopy() *PasswordSecret { + if in == nil { + return nil + } + out := new(PasswordSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PasswordSpec) DeepCopyInto(out *PasswordSpec) { + *out = *in + in.K8sSecret.DeepCopyInto(&out.K8sSecret) + in.OciSecret.DeepCopyInto(&out.OciSecret) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PasswordSpec. +func (in *PasswordSpec) DeepCopy() *PasswordSpec { + if in == nil { + return nil + } + out := new(PasswordSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PoolSettings) DeepCopyInto(out *PoolSettings) { + *out = *in + out.DBSecret = in.DBSecret + out.DBAdminUserSecret = in.DBAdminUserSecret + out.DBCDBAdminUserSecret = in.DBCDBAdminUserSecret + if in.DBPoolDestroyTimeout != nil { + in, out := &in.DBPoolDestroyTimeout, &out.DBPoolDestroyTimeout + *out = new(timex.Duration) + **out = **in + } + if in.DebugTrackResources != nil { + in, out := &in.DebugTrackResources, &out.DebugTrackResources + *out = new(bool) + **out = **in + } + if in.FeatureOpenservicebrokerExclude != nil { + in, out := &in.FeatureOpenservicebrokerExclude, &out.FeatureOpenservicebrokerExclude + *out = new(bool) + **out = **in + } + if in.FeatureSDW != nil { + in, out := &in.FeatureSDW, &out.FeatureSDW + *out = new(bool) + **out = **in + } + if in.OwaTraceSql != nil { + in, out := &in.OwaTraceSql, &out.OwaTraceSql + *out = new(bool) + **out = **in + } + if in.SecurityJWTProfileEnabled != nil { + in, out := &in.SecurityJWTProfileEnabled, &out.SecurityJWTProfileEnabled + *out = new(bool) + **out = **in + } + if in.SecurityJWKSSize != nil { + in, out := &in.SecurityJWKSSize, &out.SecurityJWKSSize + *out = new(int32) + **out = **in + } + if in.SecurityJWKSConnectionTimeout != nil { + in, out := &in.SecurityJWKSConnectionTimeout, &out.SecurityJWKSConnectionTimeout + *out = new(timex.Duration) + **out = **in + } + if in.SecurityJWKSReadTimeout != nil { + in, out := &in.SecurityJWKSReadTimeout, &out.SecurityJWKSReadTimeout + *out = new(timex.Duration) + **out = **in + } + if in.SecurityJWKSRefreshInterval != nil { + in, out := &in.SecurityJWKSRefreshInterval, &out.SecurityJWKSRefreshInterval + *out = new(timex.Duration) + **out = **in + } + if in.SecurityJWTAllowedSkew != nil { + in, out := &in.SecurityJWTAllowedSkew, &out.SecurityJWTAllowedSkew + *out = new(timex.Duration) + **out = **in + } + if in.SecurityJWTAllowedAge != nil { + in, out := &in.SecurityJWTAllowedAge, &out.SecurityJWTAllowedAge + *out = new(timex.Duration) + **out = **in + } + if in.DBPort != nil { + in, out := &in.DBPort, &out.DBPort + *out = new(int32) + **out = **in + } + if in.JDBCInactivityTimeout != nil { + in, out := &in.JDBCInactivityTimeout, &out.JDBCInactivityTimeout + *out = new(int32) + **out = **in + } + if in.JDBCInitialLimit != nil { + in, out := &in.JDBCInitialLimit, &out.JDBCInitialLimit + *out = new(int32) + **out = **in + } + if in.JDBCMaxConnectionReuseCount != nil { + in, out := &in.JDBCMaxConnectionReuseCount, &out.JDBCMaxConnectionReuseCount + *out = new(int32) + **out = **in + } + if in.JDBCMaxConnectionReuseTime != nil { + in, out := &in.JDBCMaxConnectionReuseTime, &out.JDBCMaxConnectionReuseTime + *out = new(int32) + **out = **in + } + if in.JDBCSecondsToTrustIdleConnection != nil { + in, out := &in.JDBCSecondsToTrustIdleConnection, &out.JDBCSecondsToTrustIdleConnection + *out = new(int32) + **out = **in + } + if in.JDBCMaxLimit != nil { + in, out := &in.JDBCMaxLimit, &out.JDBCMaxLimit + *out = new(int32) + **out = **in + } + if in.JDBCAuthEnabled != nil { + in, out := &in.JDBCAuthEnabled, &out.JDBCAuthEnabled + *out = new(bool) + **out = **in + } + if in.JDBCMaxStatementsLimit != nil { + in, out := &in.JDBCMaxStatementsLimit, &out.JDBCMaxStatementsLimit + *out = new(int32) + **out = **in + } + if in.JDBCMinLimit != nil { + in, out := &in.JDBCMinLimit, &out.JDBCMinLimit + *out = new(int32) + **out = **in + } + if in.JDBCStatementTimeout != nil { + in, out := &in.JDBCStatementTimeout, &out.JDBCStatementTimeout + *out = new(int32) + **out = **in + } + if in.MiscPaginationMaxRows != nil { + in, out := &in.MiscPaginationMaxRows, &out.MiscPaginationMaxRows + *out = new(int32) + **out = **in + } + if in.RestEnabledSqlActive != nil { + in, out := &in.RestEnabledSqlActive, &out.RestEnabledSqlActive + *out = new(bool) + **out = **in + } + if in.DBWalletSecret != nil { + in, out := &in.DBWalletSecret, &out.DBWalletSecret + *out = new(DBWalletSecret) + **out = **in + } + if in.TNSAdminSecret != nil { + in, out := &in.TNSAdminSecret, &out.TNSAdminSecret + *out = new(TNSAdminSecret) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoolSettings. +func (in *PoolSettings) DeepCopy() *PoolSettings { + if in == nil { + return nil + } + out := new(PoolSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortMapping) DeepCopyInto(out *PortMapping) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortMapping. +func (in *PortMapping) DeepCopy() *PortMapping { + if in == nil { + return nil + } + out := new(PortMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriVKey) DeepCopyInto(out *PriVKey) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriVKey. +func (in *PriVKey) DeepCopy() *PriVKey { + if in == nil { + return nil + } + out := new(PriVKey) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretDetails) DeepCopyInto(out *SecretDetails) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretDetails. +func (in *SecretDetails) DeepCopy() *SecretDetails { + if in == nil { + return nil + } + out := new(SecretDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardSpec) DeepCopyInto(out *ShardSpec) { + *out = *in + if in.EnvVars != nil { + in, out := &in.EnvVars, &out.EnvVars + *out = make([]EnvironmentVariable, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(corev1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PvAnnotations != nil { + in, out := &in.PvAnnotations, &out.PvAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PvMatchLabels != nil { + in, out := &in.PvMatchLabels, &out.PvMatchLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ImagePulllPolicy != nil { + in, out := &in.ImagePulllPolicy, &out.ImagePulllPolicy + *out = new(corev1.PullPolicy) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardSpec. +func (in *ShardSpec) DeepCopy() *ShardSpec { + if in == nil { + return nil + } + out := new(ShardSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardingDatabase) DeepCopyInto(out *ShardingDatabase) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardingDatabase. +func (in *ShardingDatabase) DeepCopy() *ShardingDatabase { + if in == nil { + return nil + } + out := new(ShardingDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ShardingDatabase) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardingDatabaseList) DeepCopyInto(out *ShardingDatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ShardingDatabase, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardingDatabaseList. +func (in *ShardingDatabaseList) DeepCopy() *ShardingDatabaseList { + if in == nil { + return nil + } + out := new(ShardingDatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ShardingDatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardingDatabaseSpec) DeepCopyInto(out *ShardingDatabaseSpec) { + *out = *in + if in.Shard != nil { + in, out := &in.Shard, &out.Shard + *out = make([]ShardSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Catalog != nil { + in, out := &in.Catalog, &out.Catalog + *out = make([]CatalogSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Gsm != nil { + in, out := &in.Gsm, &out.Gsm + *out = make([]GsmSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PortMappings != nil { + in, out := &in.PortMappings, &out.PortMappings + *out = make([]PortMapping, len(*in)) + copy(*out, *in) + } + if in.GsmShardSpace != nil { + in, out := &in.GsmShardSpace, &out.GsmShardSpace + *out = make([]GsmShardSpaceSpec, len(*in)) + copy(*out, *in) + } + if in.GsmShardGroup != nil { + in, out := &in.GsmShardGroup, &out.GsmShardGroup + *out = make([]GsmShardGroupSpec, len(*in)) + copy(*out, *in) + } + if in.ShardRegion != nil { + in, out := &in.ShardRegion, &out.ShardRegion + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GsmService != nil { + in, out := &in.GsmService, &out.GsmService + *out = make([]GsmServiceSpec, len(*in)) + copy(*out, *in) + } + if in.DbSecret != nil { + in, out := &in.DbSecret, &out.DbSecret + *out = new(SecretDetails) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardingDatabaseSpec. +func (in *ShardingDatabaseSpec) DeepCopy() *ShardingDatabaseSpec { + if in == nil { + return nil + } + out := new(ShardingDatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardingDatabaseStatus) DeepCopyInto(out *ShardingDatabaseStatus) { + *out = *in + if in.Shard != nil { + in, out := &in.Shard, &out.Shard + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Catalog != nil { + in, out := &in.Catalog, &out.Catalog + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Gsm.DeepCopyInto(&out.Gsm) + if in.CrdStatus != nil { + in, out := &in.CrdStatus, &out.CrdStatus + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardingDatabaseStatus. +func (in *ShardingDatabaseStatus) DeepCopy() *ShardingDatabaseStatus { + if in == nil { + return nil + } + out := new(ShardingDatabaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingleInstanceDatabase) DeepCopyInto(out *SingleInstanceDatabase) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabase. +func (in *SingleInstanceDatabase) DeepCopy() *SingleInstanceDatabase { + if in == nil { + return nil + } + out := new(SingleInstanceDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SingleInstanceDatabase) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingleInstanceDatabaseAdminPassword) DeepCopyInto(out *SingleInstanceDatabaseAdminPassword) { + *out = *in + if in.KeepSecret != nil { + in, out := &in.KeepSecret, &out.KeepSecret + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabaseAdminPassword. +func (in *SingleInstanceDatabaseAdminPassword) DeepCopy() *SingleInstanceDatabaseAdminPassword { + if in == nil { + return nil + } + out := new(SingleInstanceDatabaseAdminPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingleInstanceDatabaseImage) DeepCopyInto(out *SingleInstanceDatabaseImage) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabaseImage. +func (in *SingleInstanceDatabaseImage) DeepCopy() *SingleInstanceDatabaseImage { + if in == nil { + return nil + } + out := new(SingleInstanceDatabaseImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingleInstanceDatabaseInitParams) DeepCopyInto(out *SingleInstanceDatabaseInitParams) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabaseInitParams. +func (in *SingleInstanceDatabaseInitParams) DeepCopy() *SingleInstanceDatabaseInitParams { + if in == nil { + return nil + } + out := new(SingleInstanceDatabaseInitParams) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingleInstanceDatabaseList) DeepCopyInto(out *SingleInstanceDatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SingleInstanceDatabase, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabaseList. +func (in *SingleInstanceDatabaseList) DeepCopy() *SingleInstanceDatabaseList { + if in == nil { + return nil + } + out := new(SingleInstanceDatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SingleInstanceDatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingleInstanceDatabasePersistence) DeepCopyInto(out *SingleInstanceDatabasePersistence) { + *out = *in + if in.SetWritePermissions != nil { + in, out := &in.SetWritePermissions, &out.SetWritePermissions + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabasePersistence. +func (in *SingleInstanceDatabasePersistence) DeepCopy() *SingleInstanceDatabasePersistence { + if in == nil { + return nil + } + out := new(SingleInstanceDatabasePersistence) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingleInstanceDatabaseResource) DeepCopyInto(out *SingleInstanceDatabaseResource) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabaseResource. +func (in *SingleInstanceDatabaseResource) DeepCopy() *SingleInstanceDatabaseResource { + if in == nil { + return nil + } + out := new(SingleInstanceDatabaseResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingleInstanceDatabaseResources) DeepCopyInto(out *SingleInstanceDatabaseResources) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = new(SingleInstanceDatabaseResource) + **out = **in + } + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = new(SingleInstanceDatabaseResource) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabaseResources. +func (in *SingleInstanceDatabaseResources) DeepCopy() *SingleInstanceDatabaseResources { + if in == nil { + return nil + } + out := new(SingleInstanceDatabaseResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingleInstanceDatabaseSpec) DeepCopyInto(out *SingleInstanceDatabaseSpec) { + *out = *in + if in.ServiceAnnotations != nil { + in, out := &in.ServiceAnnotations, &out.ServiceAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.FlashBack != nil { + in, out := &in.FlashBack, &out.FlashBack + *out = new(bool) + **out = **in + } + if in.ArchiveLog != nil { + in, out := &in.ArchiveLog, &out.ArchiveLog + *out = new(bool) + **out = **in + } + if in.ForceLogging != nil { + in, out := &in.ForceLogging, &out.ForceLogging + *out = new(bool) + **out = **in + } + if in.TrueCacheServices != nil { + in, out := &in.TrueCacheServices, &out.TrueCacheServices + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.AdminPassword.DeepCopyInto(&out.AdminPassword) + out.Image = in.Image + in.Persistence.DeepCopyInto(&out.Persistence) + if in.InitParams != nil { + in, out := &in.InitParams, &out.InitParams + *out = new(SingleInstanceDatabaseInitParams) + **out = **in + } + in.Resources.DeepCopyInto(&out.Resources) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabaseSpec. +func (in *SingleInstanceDatabaseSpec) DeepCopy() *SingleInstanceDatabaseSpec { + if in == nil { + return nil + } + out := new(SingleInstanceDatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SingleInstanceDatabaseStatus) DeepCopyInto(out *SingleInstanceDatabaseStatus) { + *out = *in + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DgBroker != nil { + in, out := &in.DgBroker, &out.DgBroker + *out = new(string) + **out = **in + } + if in.StandbyDatabases != nil { + in, out := &in.StandbyDatabases, &out.StandbyDatabases + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.InitParams = in.InitParams + in.Persistence.DeepCopyInto(&out.Persistence) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SingleInstanceDatabaseStatus. +func (in *SingleInstanceDatabaseStatus) DeepCopy() *SingleInstanceDatabaseStatus { + if in == nil { + return nil + } + out := new(SingleInstanceDatabaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceSpec) DeepCopyInto(out *SourceSpec) { + *out = *in + in.K8sAdbBackup.DeepCopyInto(&out.K8sAdbBackup) + in.PointInTime.DeepCopyInto(&out.PointInTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceSpec. +func (in *SourceSpec) DeepCopy() *SourceSpec { + if in == nil { + return nil + } + out := new(SourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TDEPwd) DeepCopyInto(out *TDEPwd) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TDEPwd. +func (in *TDEPwd) DeepCopy() *TDEPwd { + if in == nil { + return nil + } + out := new(TDEPwd) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TDESecret) DeepCopyInto(out *TDESecret) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TDESecret. +func (in *TDESecret) DeepCopy() *TDESecret { + if in == nil { + return nil + } + out := new(TDESecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TNSAdminSecret) DeepCopyInto(out *TNSAdminSecret) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TNSAdminSecret. +func (in *TNSAdminSecret) DeepCopy() *TNSAdminSecret { + if in == nil { + return nil + } + out := new(TNSAdminSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetSpec) DeepCopyInto(out *TargetSpec) { + *out = *in + in.K8sAdb.DeepCopyInto(&out.K8sAdb) + in.OciAdb.DeepCopyInto(&out.OciAdb) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetSpec. +func (in *TargetSpec) DeepCopy() *TargetSpec { + if in == nil { + return nil + } + out := new(TargetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VmNetworkDetails) DeepCopyInto(out *VmNetworkDetails) { + *out = *in + if in.VcnName != nil { + in, out := &in.VcnName, &out.VcnName + *out = new(string) + **out = **in + } + if in.SubnetName != nil { + in, out := &in.SubnetName, &out.SubnetName + *out = new(string) + **out = **in + } + if in.ScanDnsName != nil { + in, out := &in.ScanDnsName, &out.ScanDnsName + *out = new(string) + **out = **in + } + if in.ListenerPort != nil { + in, out := &in.ListenerPort, &out.ListenerPort + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VmNetworkDetails. +func (in *VmNetworkDetails) DeepCopy() *VmNetworkDetails { + if in == nil { + return nil + } + out := new(VmNetworkDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WalletSpec) DeepCopyInto(out *WalletSpec) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + in.Password.DeepCopyInto(&out.Password) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WalletSpec. +func (in *WalletSpec) DeepCopy() *WalletSpec { + if in == nil { + return nil + } + out := new(WalletSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebLrestServerPassword) DeepCopyInto(out *WebLrestServerPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebLrestServerPassword. +func (in *WebLrestServerPassword) DeepCopy() *WebLrestServerPassword { + if in == nil { + return nil + } + out := new(WebLrestServerPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebLrestServerUser) DeepCopyInto(out *WebLrestServerUser) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebLrestServerUser. +func (in *WebLrestServerUser) DeepCopy() *WebLrestServerUser { + if in == nil { + return nil + } + out := new(WebLrestServerUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebLrpdbServerPassword) DeepCopyInto(out *WebLrpdbServerPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebLrpdbServerPassword. +func (in *WebLrpdbServerPassword) DeepCopy() *WebLrpdbServerPassword { + if in == nil { + return nil + } + out := new(WebLrpdbServerPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebLrpdbServerUser) DeepCopyInto(out *WebLrpdbServerUser) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebLrpdbServerUser. +func (in *WebLrpdbServerUser) DeepCopy() *WebLrpdbServerUser { + if in == nil { + return nil + } + out := new(WebLrpdbServerUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebServerPassword) DeepCopyInto(out *WebServerPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebServerPassword. +func (in *WebServerPassword) DeepCopy() *WebServerPassword { + if in == nil { + return nil + } + out := new(WebServerPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebServerPasswordPDB) DeepCopyInto(out *WebServerPasswordPDB) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebServerPasswordPDB. +func (in *WebServerPasswordPDB) DeepCopy() *WebServerPasswordPDB { + if in == nil { + return nil + } + out := new(WebServerPasswordPDB) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebServerUser) DeepCopyInto(out *WebServerUser) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebServerUser. +func (in *WebServerUser) DeepCopy() *WebServerUser { + if in == nil { + return nil + } + out := new(WebServerUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebServerUserPDB) DeepCopyInto(out *WebServerUserPDB) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebServerUserPDB. +func (in *WebServerUserPDB) DeepCopy() *WebServerUserPDB { + if in == nil { + return nil + } + out := new(WebServerUserPDB) + in.DeepCopyInto(out) + return out +} diff --git a/apis/observability/v1/databaseobserver_types.go b/apis/observability/v1/databaseobserver_types.go new file mode 100644 index 00000000..642ff18b --- /dev/null +++ b/apis/observability/v1/databaseobserver_types.go @@ -0,0 +1,195 @@ +/* +** Copyright (c) 2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1 + +import ( + monitorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type StatusEnum string + +// DatabaseObserverSpec defines the desired state of DatabaseObserver +type DatabaseObserverSpec struct { + Database DatabaseObserverDatabase `json:"database,omitempty"` + Exporter DatabaseObserverExporterConfig `json:"exporter,omitempty"` + ExporterConfig DatabaseObserverConfigMap `json:"configuration,omitempty"` + Prometheus PrometheusConfig `json:"prometheus,omitempty"` + OCIConfig OCIConfigSpec `json:"ociConfig,omitempty"` + Replicas int32 `json:"replicas,omitempty"` + Log LogConfig `json:"log,omitempty"` + InheritLabels []string `json:"inheritLabels,omitempty"` + ExporterSidecars []corev1.Container `json:"sidecars,omitempty"` + SideCarVolumes []corev1.Volume `json:"sidecarVolumes,omitempty"` +} + +// LogConfig defines the configuration details relation to the logs of DatabaseObserver +type LogConfig struct { + Path string `json:"path,omitempty"` + Filename string `json:"filename,omitempty"` + Volume LogVolume `json:"volume,omitempty"` +} + +type LogVolume struct { + Name string `json:"name,omitempty"` + PersistentVolumeClaim LogVolumePVClaim `json:"persistentVolumeClaim,omitempty"` +} + +type LogVolumePVClaim struct { + ClaimName string `json:"claimName,omitempty"` +} + +// DatabaseObserverDatabase defines the database details used for DatabaseObserver +type DatabaseObserverDatabase struct { + DBUser DBSecret `json:"dbUser,omitempty"` + DBPassword DBSecretWithVault `json:"dbPassword,omitempty"` + DBWallet DBSecret `json:"dbWallet,omitempty"` + DBConnectionString DBSecret `json:"dbConnectionString,omitempty"` +} + +// DatabaseObserverExporterConfig defines the configuration details related to the exporters of DatabaseObserver +type DatabaseObserverExporterConfig struct { + Deployment DatabaseObserverDeployment `json:"deployment,omitempty"` + Service DatabaseObserverService `json:"service,omitempty"` +} + +// DatabaseObserverDeployment defines the exporter deployment component of DatabaseObserver +type DatabaseObserverDeployment struct { + ExporterImage string `json:"image,omitempty"` + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"` + ExporterArgs []string `json:"args,omitempty"` + ExporterCommands []string `json:"commands,omitempty"` + ExporterEnvs map[string]string `json:"env,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + DeploymentPodTemplate DeploymentPodTemplate `json:"podTemplate,omitempty"` +} + +// DeploymentPodTemplate defines the labels for the DatabaseObserver pods component of a deployment +type DeploymentPodTemplate struct { + SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"` + Labels map[string]string `json:"labels,omitempty"` +} + +// DatabaseObserverService defines the exporter service component of DatabaseObserver +type DatabaseObserverService struct { + Ports []corev1.ServicePort `json:"ports,omitempty"` + Labels map[string]string `json:"labels,omitempty"` +} + +// PrometheusConfig defines the generated resources for Prometheus +type PrometheusConfig struct { + ServiceMonitor PrometheusServiceMonitor `json:"serviceMonitor,omitempty"` +} + +// PrometheusServiceMonitor defines DatabaseObserver servicemonitor spec +type PrometheusServiceMonitor struct { + Labels map[string]string `json:"labels,omitempty"` + NamespaceSelector *monitorv1.NamespaceSelector `json:"namespaceSelector,omitempty"` + Endpoints []monitorv1.Endpoint `json:"endpoints,omitempty"` +} + +// DBSecret defines secrets used in reference +type DBSecret struct { + Key string `json:"key,omitempty"` + SecretName string `json:"secret,omitempty"` +} + +// DBSecretWithVault defines secrets used in reference with vault fields +type DBSecretWithVault struct { + Key string `json:"key,omitempty"` + SecretName string `json:"secret,omitempty"` + VaultOCID string `json:"vaultOCID,omitempty"` + VaultSecretName string `json:"vaultSecretName,omitempty"` +} + +// DatabaseObserverConfigMap defines configMap used for metrics configuration +type DatabaseObserverConfigMap struct { + Configmap ConfigMapDetails `json:"configMap,omitempty"` +} + +// ConfigMapDetails defines the configmap name +type ConfigMapDetails struct { + Key string `json:"key,omitempty"` + Name string `json:"name,omitempty"` +} + +// OCIConfigSpec defines the configmap name and secret name used for connecting to OCI +type OCIConfigSpec struct { + ConfigMapName string `json:"configMapName,omitempty"` + SecretName string `json:"secretName,omitempty"` +} + +// DatabaseObserverStatus defines the observed state of DatabaseObserver +type DatabaseObserverStatus struct { + Conditions []metav1.Condition `json:"conditions"` + Status string `json:"status,omitempty"` + ExporterConfig string `json:"exporterConfig"` + Version string `json:"version"` + Replicas int `json:"replicas,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:resource:shortName="dbobserver";"dbobservers" + +// DatabaseObserver is the Schema for the databaseobservers API +// +kubebuilder:printcolumn:JSONPath=".status.exporterConfig",name="ExporterConfig",type=string +// +kubebuilder:printcolumn:JSONPath=".status.status",name="Status",type=string +// +kubebuilder:printcolumn:JSONPath=".status.version",name="Version",type=string +type DatabaseObserver struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DatabaseObserverSpec `json:"spec,omitempty"` + Status DatabaseObserverStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// DatabaseObserverList contains a list of DatabaseObserver +type DatabaseObserverList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DatabaseObserver `json:"items"` +} + +func init() { + SchemeBuilder.Register(&DatabaseObserver{}, &DatabaseObserverList{}) +} diff --git a/apis/observability/v1/databaseobserver_webhook.go b/apis/observability/v1/databaseobserver_webhook.go new file mode 100644 index 00000000..286d6ed6 --- /dev/null +++ b/apis/observability/v1/databaseobserver_webhook.go @@ -0,0 +1,185 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1 + +import ( + dbcommons "github.com/oracle/oracle-database-operator/commons/database" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + "strings" +) + +// log is for logging in this package. +var databaseobserverlog = logf.Log.WithName("databaseobserver-resource") + +const ( + AllowedExporterImage = "container-registry.oracle.com/database/observability-exporter" + ErrorSpecValidationMissingConnString = "a required field for database connection string secret is missing or does not have a value" + ErrorSpecValidationMissingDBUser = "a required field for database user secret is missing or does not have a value" + ErrorSpecValidationMissingDBVaultField = "a field for the OCI vault has a value but the other required field is missing or does not have a value" + ErrorSpecValidationMissingOCIConfig = "a field(s) for the OCI Config is missing or does not have a value when fields for the OCI vault has values" + ErrorSpecValidationMissingDBPasswordSecret = "a required field for the database password secret is missing or does not have a value" + ErrorSpecExporterImageNotAllowed = "a different exporter image was found, only official database exporter container images are currently supported" +) + +func (r *DatabaseObserver) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! + +//+kubebuilder:webhook:path=/mutate-observability-oracle-com-v1-databaseobserver,mutating=true,sideEffects=none,failurePolicy=fail,groups=observability.oracle.com,resources=databaseobservers,verbs=create;update,versions=v1,name=mdatabaseobserver.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &DatabaseObserver{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *DatabaseObserver) Default() { + databaseobserverlog.Info("default", "name", r.Name) + + // TODO(user): fill in your defaulting logic. +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:verbs=create;update,path=/validate-observability-oracle-com-v1-databaseobserver,mutating=false,sideEffects=none,failurePolicy=fail,groups=observability.oracle.com,resources=databaseobservers,versions=v1,name=vdatabaseobserver.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &DatabaseObserver{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *DatabaseObserver) ValidateCreate() (admission.Warnings, error) { + databaseobserverlog.Info("validate create", "name", r.Name) + + var e field.ErrorList + ns := dbcommons.GetWatchNamespaces() + + // Check for namespace/cluster scope access + if _, isDesiredNamespaceWithinScope := ns[r.Namespace]; !isDesiredNamespaceWithinScope && len(ns) > 0 { + e = append(e, + field.Invalid(field.NewPath("metadata").Child("namespace"), r.Namespace, + "Oracle database operator doesn't watch over this namespace")) + } + + // Check required secret for db user has value + if r.Spec.Database.DBUser.SecretName == "" { + e = append(e, + field.Invalid(field.NewPath("spec").Child("database").Child("dbUser").Child("secret"), r.Spec.Database.DBUser.SecretName, + ErrorSpecValidationMissingDBUser)) + } + + // Check required secret for db connection string has value + if r.Spec.Database.DBConnectionString.SecretName == "" { + e = append(e, + field.Invalid(field.NewPath("spec").Child("database").Child("dbConnectionString").Child("secret"), r.Spec.Database.DBConnectionString.SecretName, + ErrorSpecValidationMissingConnString)) + } + + // The other vault field must have value if one does + if (r.Spec.Database.DBPassword.VaultOCID != "" && r.Spec.Database.DBPassword.VaultSecretName == "") || + (r.Spec.Database.DBPassword.VaultSecretName != "" && r.Spec.Database.DBPassword.VaultOCID == "") { + + e = append(e, + field.Invalid(field.NewPath("spec").Child("database").Child("dbPassword"), r.Spec.Database.DBPassword, + ErrorSpecValidationMissingDBVaultField)) + } + + // if vault fields have value, ociConfig must have values + if r.Spec.Database.DBPassword.VaultOCID != "" && r.Spec.Database.DBPassword.VaultSecretName != "" && + (r.Spec.OCIConfig.SecretName == "" || r.Spec.OCIConfig.ConfigMapName == "") { + + e = append(e, + field.Invalid(field.NewPath("spec").Child("ociConfig"), r.Spec.OCIConfig, + ErrorSpecValidationMissingOCIConfig)) + } + + // If all of {DB Password Secret Name and vaultOCID+vaultSecretName} have no value, then error out + if r.Spec.Database.DBPassword.SecretName == "" && + r.Spec.Database.DBPassword.VaultOCID == "" && + r.Spec.Database.DBPassword.VaultSecretName == "" { + + e = append(e, + field.Invalid(field.NewPath("spec").Child("database").Child("dbPassword").Child("secret"), r.Spec.Database.DBPassword.SecretName, + ErrorSpecValidationMissingDBPasswordSecret)) + } + + // disallow usage of any other image than the observability-exporter + if r.Spec.Exporter.Deployment.ExporterImage != "" && !strings.HasPrefix(r.Spec.Exporter.Deployment.ExporterImage, AllowedExporterImage) { + e = append(e, + field.Invalid(field.NewPath("spec").Child("exporter").Child("image"), r.Spec.Exporter.Deployment.ExporterImage, + ErrorSpecExporterImageNotAllowed)) + } + + // Return if any errors + if len(e) > 0 { + return nil, apierrors.NewInvalid(schema.GroupKind{Group: "observability.oracle.com", Kind: "DatabaseObserver"}, r.Name, e) + } + return nil, nil + +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *DatabaseObserver) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + databaseobserverlog.Info("validate update", "name", r.Name) + var e field.ErrorList + + // disallow usage of any other image than the observability-exporter + if r.Spec.Exporter.Deployment.ExporterImage != "" && !strings.HasPrefix(r.Spec.Exporter.Deployment.ExporterImage, AllowedExporterImage) { + e = append(e, + field.Invalid(field.NewPath("spec").Child("exporter").Child("image"), r.Spec.Exporter.Deployment.ExporterImage, + ErrorSpecExporterImageNotAllowed)) + } + // Return if any errors + if len(e) > 0 { + return nil, apierrors.NewInvalid(schema.GroupKind{Group: "observability.oracle.com", Kind: "DatabaseObserver"}, r.Name, e) + } + return nil, nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *DatabaseObserver) ValidateDelete() (admission.Warnings, error) { + databaseobserverlog.Info("validate delete", "name", r.Name) + + return nil, nil +} diff --git a/apis/observability/v1/groupversion_info.go b/apis/observability/v1/groupversion_info.go new file mode 100644 index 00000000..3f332c05 --- /dev/null +++ b/apis/observability/v1/groupversion_info.go @@ -0,0 +1,58 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +// Package v1 contains API Schema definitions for the observability v1 API group +// +kubebuilder:object:generate=true +// +groupName=observability.oracle.com +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "observability.oracle.com", Version: "v1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/observability/v1/zz_generated.deepcopy.go b/apis/observability/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000..4924216f --- /dev/null +++ b/apis/observability/v1/zz_generated.deepcopy.go @@ -0,0 +1,481 @@ +//go:build !ignore_autogenerated + +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapDetails) DeepCopyInto(out *ConfigMapDetails) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapDetails. +func (in *ConfigMapDetails) DeepCopy() *ConfigMapDetails { + if in == nil { + return nil + } + out := new(ConfigMapDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DBSecret) DeepCopyInto(out *DBSecret) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DBSecret. +func (in *DBSecret) DeepCopy() *DBSecret { + if in == nil { + return nil + } + out := new(DBSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DBSecretWithVault) DeepCopyInto(out *DBSecretWithVault) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DBSecretWithVault. +func (in *DBSecretWithVault) DeepCopy() *DBSecretWithVault { + if in == nil { + return nil + } + out := new(DBSecretWithVault) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserver) DeepCopyInto(out *DatabaseObserver) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserver. +func (in *DatabaseObserver) DeepCopy() *DatabaseObserver { + if in == nil { + return nil + } + out := new(DatabaseObserver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DatabaseObserver) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverConfigMap) DeepCopyInto(out *DatabaseObserverConfigMap) { + *out = *in + out.Configmap = in.Configmap +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverConfigMap. +func (in *DatabaseObserverConfigMap) DeepCopy() *DatabaseObserverConfigMap { + if in == nil { + return nil + } + out := new(DatabaseObserverConfigMap) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverDatabase) DeepCopyInto(out *DatabaseObserverDatabase) { + *out = *in + out.DBUser = in.DBUser + out.DBPassword = in.DBPassword + out.DBWallet = in.DBWallet + out.DBConnectionString = in.DBConnectionString +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverDatabase. +func (in *DatabaseObserverDatabase) DeepCopy() *DatabaseObserverDatabase { + if in == nil { + return nil + } + out := new(DatabaseObserverDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverDeployment) DeepCopyInto(out *DatabaseObserverDeployment) { + *out = *in + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(corev1.SecurityContext) + (*in).DeepCopyInto(*out) + } + if in.ExporterArgs != nil { + in, out := &in.ExporterArgs, &out.ExporterArgs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExporterCommands != nil { + in, out := &in.ExporterCommands, &out.ExporterCommands + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExporterEnvs != nil { + in, out := &in.ExporterEnvs, &out.ExporterEnvs + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.DeploymentPodTemplate.DeepCopyInto(&out.DeploymentPodTemplate) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverDeployment. +func (in *DatabaseObserverDeployment) DeepCopy() *DatabaseObserverDeployment { + if in == nil { + return nil + } + out := new(DatabaseObserverDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverExporterConfig) DeepCopyInto(out *DatabaseObserverExporterConfig) { + *out = *in + in.Deployment.DeepCopyInto(&out.Deployment) + in.Service.DeepCopyInto(&out.Service) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverExporterConfig. +func (in *DatabaseObserverExporterConfig) DeepCopy() *DatabaseObserverExporterConfig { + if in == nil { + return nil + } + out := new(DatabaseObserverExporterConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverList) DeepCopyInto(out *DatabaseObserverList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DatabaseObserver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverList. +func (in *DatabaseObserverList) DeepCopy() *DatabaseObserverList { + if in == nil { + return nil + } + out := new(DatabaseObserverList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DatabaseObserverList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverService) DeepCopyInto(out *DatabaseObserverService) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]corev1.ServicePort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverService. +func (in *DatabaseObserverService) DeepCopy() *DatabaseObserverService { + if in == nil { + return nil + } + out := new(DatabaseObserverService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverSpec) DeepCopyInto(out *DatabaseObserverSpec) { + *out = *in + out.Database = in.Database + in.Exporter.DeepCopyInto(&out.Exporter) + out.ExporterConfig = in.ExporterConfig + in.Prometheus.DeepCopyInto(&out.Prometheus) + out.OCIConfig = in.OCIConfig + out.Log = in.Log + if in.InheritLabels != nil { + in, out := &in.InheritLabels, &out.InheritLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExporterSidecars != nil { + in, out := &in.ExporterSidecars, &out.ExporterSidecars + *out = make([]corev1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SideCarVolumes != nil { + in, out := &in.SideCarVolumes, &out.SideCarVolumes + *out = make([]corev1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverSpec. +func (in *DatabaseObserverSpec) DeepCopy() *DatabaseObserverSpec { + if in == nil { + return nil + } + out := new(DatabaseObserverSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverStatus) DeepCopyInto(out *DatabaseObserverStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverStatus. +func (in *DatabaseObserverStatus) DeepCopy() *DatabaseObserverStatus { + if in == nil { + return nil + } + out := new(DatabaseObserverStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentPodTemplate) DeepCopyInto(out *DeploymentPodTemplate) { + *out = *in + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(corev1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentPodTemplate. +func (in *DeploymentPodTemplate) DeepCopy() *DeploymentPodTemplate { + if in == nil { + return nil + } + out := new(DeploymentPodTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogConfig) DeepCopyInto(out *LogConfig) { + *out = *in + out.Volume = in.Volume +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogConfig. +func (in *LogConfig) DeepCopy() *LogConfig { + if in == nil { + return nil + } + out := new(LogConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogVolume) DeepCopyInto(out *LogVolume) { + *out = *in + out.PersistentVolumeClaim = in.PersistentVolumeClaim +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogVolume. +func (in *LogVolume) DeepCopy() *LogVolume { + if in == nil { + return nil + } + out := new(LogVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogVolumePVClaim) DeepCopyInto(out *LogVolumePVClaim) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogVolumePVClaim. +func (in *LogVolumePVClaim) DeepCopy() *LogVolumePVClaim { + if in == nil { + return nil + } + out := new(LogVolumePVClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIConfigSpec) DeepCopyInto(out *OCIConfigSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIConfigSpec. +func (in *OCIConfigSpec) DeepCopy() *OCIConfigSpec { + if in == nil { + return nil + } + out := new(OCIConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusConfig) DeepCopyInto(out *PrometheusConfig) { + *out = *in + in.ServiceMonitor.DeepCopyInto(&out.ServiceMonitor) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusConfig. +func (in *PrometheusConfig) DeepCopy() *PrometheusConfig { + if in == nil { + return nil + } + out := new(PrometheusConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusServiceMonitor) DeepCopyInto(out *PrometheusServiceMonitor) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(monitoringv1.NamespaceSelector) + (*in).DeepCopyInto(*out) + } + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]monitoringv1.Endpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusServiceMonitor. +func (in *PrometheusServiceMonitor) DeepCopy() *PrometheusServiceMonitor { + if in == nil { + return nil + } + out := new(PrometheusServiceMonitor) + in.DeepCopyInto(out) + return out +} diff --git a/apis/observability/v1alpha1/databaseobserver_types.go b/apis/observability/v1alpha1/databaseobserver_types.go index 97827d17..f4c62900 100644 --- a/apis/observability/v1alpha1/databaseobserver_types.go +++ b/apis/observability/v1alpha1/databaseobserver_types.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2022 Oracle and/or its affiliates. +** Copyright (c) 2024 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -39,6 +39,8 @@ package v1alpha1 import ( + monitorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -46,11 +48,32 @@ type StatusEnum string // DatabaseObserverSpec defines the desired state of DatabaseObserver type DatabaseObserverSpec struct { - Database DatabaseObserverDatabase `json:"database,omitempty"` - Exporter DatabaseObserverExporterConfig `json:"exporter,omitempty"` - Prometheus PrometheusConfig `json:"prometheus,omitempty"` - OCIConfig OCIConfigSpec `json:"ociConfig,omitempty"` - Replicas int32 `json:"replicas,omitempty"` + Database DatabaseObserverDatabase `json:"database,omitempty"` + Exporter DatabaseObserverExporterConfig `json:"exporter,omitempty"` + ExporterConfig DatabaseObserverConfigMap `json:"configuration,omitempty"` + Prometheus PrometheusConfig `json:"prometheus,omitempty"` + OCIConfig OCIConfigSpec `json:"ociConfig,omitempty"` + Replicas int32 `json:"replicas,omitempty"` + Log LogConfig `json:"log,omitempty"` + InheritLabels []string `json:"inheritLabels,omitempty"` + ExporterSidecars []corev1.Container `json:"sidecars,omitempty"` + SideCarVolumes []corev1.Volume `json:"sidecarVolumes,omitempty"` +} + +// LogConfig defines the configuration details relation to the logs of DatabaseObserver +type LogConfig struct { + Path string `json:"path,omitempty"` + Filename string `json:"filename,omitempty"` + Volume LogVolume `json:"volume,omitempty"` +} + +type LogVolume struct { + Name string `json:"name,omitempty"` + PersistentVolumeClaim LogVolumePVClaim `json:"persistentVolumeClaim,omitempty"` +} + +type LogVolumePVClaim struct { + ClaimName string `json:"claimName,omitempty"` } // DatabaseObserverDatabase defines the database details used for DatabaseObserver @@ -63,27 +86,52 @@ type DatabaseObserverDatabase struct { // DatabaseObserverExporterConfig defines the configuration details related to the exporters of DatabaseObserver type DatabaseObserverExporterConfig struct { - ExporterImage string `json:"image,omitempty"` - ExporterConfig DatabaseObserverConfigMap `json:"configuration,omitempty"` - Service DatabaseObserverService `json:"service,omitempty"` + Deployment DatabaseObserverDeployment `json:"deployment,omitempty"` + Service DatabaseObserverService `json:"service,omitempty"` +} + +// DatabaseObserverDeployment defines the exporter deployment component of DatabaseObserver +type DatabaseObserverDeployment struct { + ExporterImage string `json:"image,omitempty"` + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"` + ExporterArgs []string `json:"args,omitempty"` + ExporterCommands []string `json:"commands,omitempty"` + ExporterEnvs map[string]string `json:"env,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + DeploymentPodTemplate DeploymentPodTemplate `json:"podTemplate,omitempty"` +} + +// DeploymentPodTemplate defines the labels for the DatabaseObserver pods component of a deployment +type DeploymentPodTemplate struct { + SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"` + Labels map[string]string `json:"labels,omitempty"` } // DatabaseObserverService defines the exporter service component of DatabaseObserver type DatabaseObserverService struct { - Port int32 `json:"port,omitempty"` + Ports []corev1.ServicePort `json:"ports,omitempty"` + Labels map[string]string `json:"labels,omitempty"` } // PrometheusConfig defines the generated resources for Prometheus type PrometheusConfig struct { - Labels map[string]string `json:"labels,omitempty"` - Port string `json:"port,omitempty"` + ServiceMonitor PrometheusServiceMonitor `json:"serviceMonitor,omitempty"` +} + +// PrometheusServiceMonitor defines DatabaseObserver servicemonitor spec +type PrometheusServiceMonitor struct { + Labels map[string]string `json:"labels,omitempty"` + NamespaceSelector *monitorv1.NamespaceSelector `json:"namespaceSelector,omitempty"` + Endpoints []monitorv1.Endpoint `json:"endpoints,omitempty"` } +// DBSecret defines secrets used in reference type DBSecret struct { Key string `json:"key,omitempty"` SecretName string `json:"secret,omitempty"` } +// DBSecretWithVault defines secrets used in reference with vault fields type DBSecretWithVault struct { Key string `json:"key,omitempty"` SecretName string `json:"secret,omitempty"` @@ -91,16 +139,18 @@ type DBSecretWithVault struct { VaultSecretName string `json:"vaultSecretName,omitempty"` } +// DatabaseObserverConfigMap defines configMap used for metrics configuration type DatabaseObserverConfigMap struct { - Configmap ConfigMapDetails `json:"configmap,omitempty"` + Configmap ConfigMapDetails `json:"configMap,omitempty"` } // ConfigMapDetails defines the configmap name type ConfigMapDetails struct { Key string `json:"key,omitempty"` - Name string `json:"configmapName,omitempty"` + Name string `json:"name,omitempty"` } +// OCIConfigSpec defines the configmap name and secret name used for connecting to OCI type OCIConfigSpec struct { ConfigMapName string `json:"configMapName,omitempty"` SecretName string `json:"secretName,omitempty"` @@ -108,20 +158,21 @@ type OCIConfigSpec struct { // DatabaseObserverStatus defines the observed state of DatabaseObserver type DatabaseObserverStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file Conditions []metav1.Condition `json:"conditions"` Status string `json:"status,omitempty"` ExporterConfig string `json:"exporterConfig"` + Version string `json:"version"` Replicas int `json:"replicas,omitempty"` } //+kubebuilder:object:root=true //+kubebuilder:subresource:status +// +kubebuilder:resource:shortName="dbobserver";"dbobservers" // DatabaseObserver is the Schema for the databaseobservers API // +kubebuilder:printcolumn:JSONPath=".status.exporterConfig",name="ExporterConfig",type=string // +kubebuilder:printcolumn:JSONPath=".status.status",name="Status",type=string +// +kubebuilder:printcolumn:JSONPath=".status.version",name="Version",type=string type DatabaseObserver struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/apis/observability/v1alpha1/databaseobserver_webhook.go b/apis/observability/v1alpha1/databaseobserver_webhook.go index 2ab9b732..585ad3bf 100644 --- a/apis/observability/v1alpha1/databaseobserver_webhook.go +++ b/apis/observability/v1alpha1/databaseobserver_webhook.go @@ -145,9 +145,9 @@ func (r *DatabaseObserver) ValidateCreate() (admission.Warnings, error) { } // disallow usage of any other image than the observability-exporter - if r.Spec.Exporter.ExporterImage != "" && !strings.HasPrefix(r.Spec.Exporter.ExporterImage, AllowedExporterImage) { + if r.Spec.Exporter.Deployment.ExporterImage != "" && !strings.HasPrefix(r.Spec.Exporter.Deployment.ExporterImage, AllowedExporterImage) { e = append(e, - field.Invalid(field.NewPath("spec").Child("exporter").Child("image"), r.Spec.Exporter.ExporterImage, + field.Invalid(field.NewPath("spec").Child("exporter").Child("image"), r.Spec.Exporter.Deployment.ExporterImage, ErrorSpecExporterImageNotAllowed)) } @@ -165,9 +165,9 @@ func (r *DatabaseObserver) ValidateUpdate(old runtime.Object) (admission.Warning var e field.ErrorList // disallow usage of any other image than the observability-exporter - if r.Spec.Exporter.ExporterImage != "" && !strings.HasPrefix(r.Spec.Exporter.ExporterImage, AllowedExporterImage) { + if r.Spec.Exporter.Deployment.ExporterImage != "" && !strings.HasPrefix(r.Spec.Exporter.Deployment.ExporterImage, AllowedExporterImage) { e = append(e, - field.Invalid(field.NewPath("spec").Child("exporter").Child("image"), r.Spec.Exporter.ExporterImage, + field.Invalid(field.NewPath("spec").Child("exporter").Child("image"), r.Spec.Exporter.Deployment.ExporterImage, ErrorSpecExporterImageNotAllowed)) } // Return if any errors diff --git a/apis/observability/v1alpha1/zz_generated.deepcopy.go b/apis/observability/v1alpha1/zz_generated.deepcopy.go index 39b438eb..4b2a29b0 100644 --- a/apis/observability/v1alpha1/zz_generated.deepcopy.go +++ b/apis/observability/v1alpha1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* ** Copyright (c) 2022 Oracle and/or its affiliates. @@ -44,7 +43,9 @@ package v1alpha1 import ( - "k8s.io/apimachinery/pkg/apis/meta/v1" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -155,11 +156,56 @@ func (in *DatabaseObserverDatabase) DeepCopy() *DatabaseObserverDatabase { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverDeployment) DeepCopyInto(out *DatabaseObserverDeployment) { + *out = *in + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.SecurityContext) + (*in).DeepCopyInto(*out) + } + if in.ExporterArgs != nil { + in, out := &in.ExporterArgs, &out.ExporterArgs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExporterCommands != nil { + in, out := &in.ExporterCommands, &out.ExporterCommands + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExporterEnvs != nil { + in, out := &in.ExporterEnvs, &out.ExporterEnvs + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.DeploymentPodTemplate.DeepCopyInto(&out.DeploymentPodTemplate) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverDeployment. +func (in *DatabaseObserverDeployment) DeepCopy() *DatabaseObserverDeployment { + if in == nil { + return nil + } + out := new(DatabaseObserverDeployment) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DatabaseObserverExporterConfig) DeepCopyInto(out *DatabaseObserverExporterConfig) { *out = *in - out.ExporterConfig = in.ExporterConfig - out.Service = in.Service + in.Deployment.DeepCopyInto(&out.Deployment) + in.Service.DeepCopyInto(&out.Service) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverExporterConfig. @@ -207,6 +253,20 @@ func (in *DatabaseObserverList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DatabaseObserverService) DeepCopyInto(out *DatabaseObserverService) { *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]v1.ServicePort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverService. @@ -223,9 +283,30 @@ func (in *DatabaseObserverService) DeepCopy() *DatabaseObserverService { func (in *DatabaseObserverSpec) DeepCopyInto(out *DatabaseObserverSpec) { *out = *in out.Database = in.Database - out.Exporter = in.Exporter + in.Exporter.DeepCopyInto(&out.Exporter) + out.ExporterConfig = in.ExporterConfig in.Prometheus.DeepCopyInto(&out.Prometheus) out.OCIConfig = in.OCIConfig + out.Log = in.Log + if in.InheritLabels != nil { + in, out := &in.InheritLabels, &out.InheritLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExporterSidecars != nil { + in, out := &in.ExporterSidecars, &out.ExporterSidecars + *out = make([]v1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SideCarVolumes != nil { + in, out := &in.SideCarVolumes, &out.SideCarVolumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverSpec. @@ -243,7 +324,7 @@ func (in *DatabaseObserverStatus) DeepCopyInto(out *DatabaseObserverStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -260,6 +341,80 @@ func (in *DatabaseObserverStatus) DeepCopy() *DatabaseObserverStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentPodTemplate) DeepCopyInto(out *DeploymentPodTemplate) { + *out = *in + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentPodTemplate. +func (in *DeploymentPodTemplate) DeepCopy() *DeploymentPodTemplate { + if in == nil { + return nil + } + out := new(DeploymentPodTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogConfig) DeepCopyInto(out *LogConfig) { + *out = *in + out.Volume = in.Volume +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogConfig. +func (in *LogConfig) DeepCopy() *LogConfig { + if in == nil { + return nil + } + out := new(LogConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogVolume) DeepCopyInto(out *LogVolume) { + *out = *in + out.PersistentVolumeClaim = in.PersistentVolumeClaim +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogVolume. +func (in *LogVolume) DeepCopy() *LogVolume { + if in == nil { + return nil + } + out := new(LogVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogVolumePVClaim) DeepCopyInto(out *LogVolumePVClaim) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogVolumePVClaim. +func (in *LogVolumePVClaim) DeepCopy() *LogVolumePVClaim { + if in == nil { + return nil + } + out := new(LogVolumePVClaim) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OCIConfigSpec) DeepCopyInto(out *OCIConfigSpec) { *out = *in @@ -277,6 +432,22 @@ func (in *OCIConfigSpec) DeepCopy() *OCIConfigSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PrometheusConfig) DeepCopyInto(out *PrometheusConfig) { + *out = *in + in.ServiceMonitor.DeepCopyInto(&out.ServiceMonitor) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusConfig. +func (in *PrometheusConfig) DeepCopy() *PrometheusConfig { + if in == nil { + return nil + } + out := new(PrometheusConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusServiceMonitor) DeepCopyInto(out *PrometheusServiceMonitor) { *out = *in if in.Labels != nil { in, out := &in.Labels, &out.Labels @@ -285,14 +456,26 @@ func (in *PrometheusConfig) DeepCopyInto(out *PrometheusConfig) { (*out)[key] = val } } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(monitoringv1.NamespaceSelector) + (*in).DeepCopyInto(*out) + } + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]monitoringv1.Endpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusConfig. -func (in *PrometheusConfig) DeepCopy() *PrometheusConfig { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusServiceMonitor. +func (in *PrometheusServiceMonitor) DeepCopy() *PrometheusServiceMonitor { if in == nil { return nil } - out := new(PrometheusConfig) + out := new(PrometheusServiceMonitor) in.DeepCopyInto(out) return out } diff --git a/apis/observability/v4/databaseobserver_types.go b/apis/observability/v4/databaseobserver_types.go new file mode 100644 index 00000000..2b9df606 --- /dev/null +++ b/apis/observability/v4/databaseobserver_types.go @@ -0,0 +1,196 @@ +/* +** Copyright (c) 2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + monitorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type StatusEnum string + +// DatabaseObserverSpec defines the desired state of DatabaseObserver +type DatabaseObserverSpec struct { + Database DatabaseObserverDatabase `json:"database,omitempty"` + Exporter DatabaseObserverExporterConfig `json:"exporter,omitempty"` + ExporterConfig DatabaseObserverConfigMap `json:"configuration,omitempty"` + Prometheus PrometheusConfig `json:"prometheus,omitempty"` + OCIConfig OCIConfigSpec `json:"ociConfig,omitempty"` + Replicas int32 `json:"replicas,omitempty"` + Log LogConfig `json:"log,omitempty"` + InheritLabels []string `json:"inheritLabels,omitempty"` + ExporterSidecars []corev1.Container `json:"sidecars,omitempty"` + SideCarVolumes []corev1.Volume `json:"sidecarVolumes,omitempty"` +} + +// LogConfig defines the configuration details relation to the logs of DatabaseObserver +type LogConfig struct { + Path string `json:"path,omitempty"` + Filename string `json:"filename,omitempty"` + Volume LogVolume `json:"volume,omitempty"` +} + +type LogVolume struct { + Name string `json:"name,omitempty"` + PersistentVolumeClaim LogVolumePVClaim `json:"persistentVolumeClaim,omitempty"` +} + +type LogVolumePVClaim struct { + ClaimName string `json:"claimName,omitempty"` +} + +// DatabaseObserverDatabase defines the database details used for DatabaseObserver +type DatabaseObserverDatabase struct { + DBUser DBSecret `json:"dbUser,omitempty"` + DBPassword DBSecretWithVault `json:"dbPassword,omitempty"` + DBWallet DBSecret `json:"dbWallet,omitempty"` + DBConnectionString DBSecret `json:"dbConnectionString,omitempty"` +} + +// DatabaseObserverExporterConfig defines the configuration details related to the exporters of DatabaseObserver +type DatabaseObserverExporterConfig struct { + Deployment DatabaseObserverDeployment `json:"deployment,omitempty"` + Service DatabaseObserverService `json:"service,omitempty"` +} + +// DatabaseObserverDeployment defines the exporter deployment component of DatabaseObserver +type DatabaseObserverDeployment struct { + ExporterImage string `json:"image,omitempty"` + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"` + ExporterArgs []string `json:"args,omitempty"` + ExporterCommands []string `json:"commands,omitempty"` + ExporterEnvs map[string]string `json:"env,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + DeploymentPodTemplate DeploymentPodTemplate `json:"podTemplate,omitempty"` +} + +// DeploymentPodTemplate defines the labels for the DatabaseObserver pods component of a deployment +type DeploymentPodTemplate struct { + SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"` + Labels map[string]string `json:"labels,omitempty"` +} + +// DatabaseObserverService defines the exporter service component of DatabaseObserver +type DatabaseObserverService struct { + Ports []corev1.ServicePort `json:"ports,omitempty"` + Labels map[string]string `json:"labels,omitempty"` +} + +// PrometheusConfig defines the generated resources for Prometheus +type PrometheusConfig struct { + ServiceMonitor PrometheusServiceMonitor `json:"serviceMonitor,omitempty"` +} + +// PrometheusServiceMonitor defines DatabaseObserver servicemonitor spec +type PrometheusServiceMonitor struct { + Labels map[string]string `json:"labels,omitempty"` + NamespaceSelector *monitorv1.NamespaceSelector `json:"namespaceSelector,omitempty"` + Endpoints []monitorv1.Endpoint `json:"endpoints,omitempty"` +} + +// DBSecret defines secrets used in reference +type DBSecret struct { + Key string `json:"key,omitempty"` + SecretName string `json:"secret,omitempty"` +} + +// DBSecretWithVault defines secrets used in reference with vault fields +type DBSecretWithVault struct { + Key string `json:"key,omitempty"` + SecretName string `json:"secret,omitempty"` + VaultOCID string `json:"vaultOCID,omitempty"` + VaultSecretName string `json:"vaultSecretName,omitempty"` +} + +// DatabaseObserverConfigMap defines configMap used for metrics configuration +type DatabaseObserverConfigMap struct { + Configmap ConfigMapDetails `json:"configMap,omitempty"` +} + +// ConfigMapDetails defines the configmap name +type ConfigMapDetails struct { + Key string `json:"key,omitempty"` + Name string `json:"name,omitempty"` +} + +// OCIConfigSpec defines the configmap name and secret name used for connecting to OCI +type OCIConfigSpec struct { + ConfigMapName string `json:"configMapName,omitempty"` + SecretName string `json:"secretName,omitempty"` +} + +// DatabaseObserverStatus defines the observed state of DatabaseObserver +type DatabaseObserverStatus struct { + Conditions []metav1.Condition `json:"conditions"` + Status string `json:"status,omitempty"` + ExporterConfig string `json:"exporterConfig"` + Version string `json:"version"` + Replicas int `json:"replicas,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:resource:shortName="dbobserver";"dbobservers" + +// DatabaseObserver is the Schema for the databaseobservers API +// +kubebuilder:printcolumn:JSONPath=".status.exporterConfig",name="ExporterConfig",type=string +// +kubebuilder:printcolumn:JSONPath=".status.status",name="Status",type=string +// +kubebuilder:printcolumn:JSONPath=".status.version",name="Version",type=string +// +kubebuilder:storageversion +type DatabaseObserver struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DatabaseObserverSpec `json:"spec,omitempty"` + Status DatabaseObserverStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// DatabaseObserverList contains a list of DatabaseObserver +type DatabaseObserverList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DatabaseObserver `json:"items"` +} + +func init() { + SchemeBuilder.Register(&DatabaseObserver{}, &DatabaseObserverList{}) +} diff --git a/apis/observability/v4/databaseobserver_webhook.go b/apis/observability/v4/databaseobserver_webhook.go new file mode 100644 index 00000000..c0a5d8b7 --- /dev/null +++ b/apis/observability/v4/databaseobserver_webhook.go @@ -0,0 +1,182 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v4 + +import ( + dbcommons "github.com/oracle/oracle-database-operator/commons/database" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + "strings" +) + +// log is for logging in this package. +var databaseobserverlog = logf.Log.WithName("databaseobserver-resource") + +const ( + AllowedExporterImage = "container-registry.oracle.com/database/observability-exporter" + ErrorSpecValidationMissingConnString = "a required field for database connection string secret is missing or does not have a value" + ErrorSpecValidationMissingDBUser = "a required field for database user secret is missing or does not have a value" + ErrorSpecValidationMissingDBVaultField = "a field for the OCI vault has a value but the other required field is missing or does not have a value" + ErrorSpecValidationMissingOCIConfig = "a field(s) for the OCI Config is missing or does not have a value when fields for the OCI vault has values" + ErrorSpecValidationMissingDBPasswordSecret = "a required field for the database password secret is missing or does not have a value" + ErrorSpecExporterImageNotAllowed = "a different exporter image was found, only official database exporter container images are currently supported" +) + +func (r *DatabaseObserver) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +//+kubebuilder:webhook:path=/mutate-observability-oracle-com-v4-databaseobserver,mutating=true,sideEffects=none,failurePolicy=fail,groups=observability.oracle.com,resources=databaseobservers,verbs=create;update,versions=v4,name=mdatabaseobserver.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &DatabaseObserver{} + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *DatabaseObserver) Default() { + databaseobserverlog.Info("default", "name", r.Name) +} + +//+kubebuilder:webhook:verbs=create;update,path=/validate-observability-oracle-com-v4-databaseobserver,mutating=false,sideEffects=none,failurePolicy=fail,groups=observability.oracle.com,resources=databaseobservers,versions=v4,name=vdatabaseobserver.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &DatabaseObserver{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *DatabaseObserver) ValidateCreate() (admission.Warnings, error) { + databaseobserverlog.Info("validate create", "name", r.Name) + + var e field.ErrorList + ns := dbcommons.GetWatchNamespaces() + + // Check for namespace/cluster scope access + if _, isDesiredNamespaceWithinScope := ns[r.Namespace]; !isDesiredNamespaceWithinScope && len(ns) > 0 { + e = append(e, + field.Invalid(field.NewPath("metadata").Child("namespace"), r.Namespace, + "Oracle database operator doesn't watch over this namespace")) + } + + // Check required secret for db user has value + if r.Spec.Database.DBUser.SecretName == "" { + e = append(e, + field.Invalid(field.NewPath("spec").Child("database").Child("dbUser").Child("secret"), r.Spec.Database.DBUser.SecretName, + ErrorSpecValidationMissingDBUser)) + } + + // Check required secret for db connection string has value + if r.Spec.Database.DBConnectionString.SecretName == "" { + e = append(e, + field.Invalid(field.NewPath("spec").Child("database").Child("dbConnectionString").Child("secret"), r.Spec.Database.DBConnectionString.SecretName, + ErrorSpecValidationMissingConnString)) + } + + // The other vault field must have value if one does + if (r.Spec.Database.DBPassword.VaultOCID != "" && r.Spec.Database.DBPassword.VaultSecretName == "") || + (r.Spec.Database.DBPassword.VaultSecretName != "" && r.Spec.Database.DBPassword.VaultOCID == "") { + + e = append(e, + field.Invalid(field.NewPath("spec").Child("database").Child("dbPassword"), r.Spec.Database.DBPassword, + ErrorSpecValidationMissingDBVaultField)) + } + + // if vault fields have value, ociConfig must have values + if r.Spec.Database.DBPassword.VaultOCID != "" && r.Spec.Database.DBPassword.VaultSecretName != "" && + (r.Spec.OCIConfig.SecretName == "" || r.Spec.OCIConfig.ConfigMapName == "") { + + e = append(e, + field.Invalid(field.NewPath("spec").Child("ociConfig"), r.Spec.OCIConfig, + ErrorSpecValidationMissingOCIConfig)) + } + + // If all of {DB Password Secret Name and vaultOCID+vaultSecretName} have no value, then error out + if r.Spec.Database.DBPassword.SecretName == "" && + r.Spec.Database.DBPassword.VaultOCID == "" && + r.Spec.Database.DBPassword.VaultSecretName == "" { + + e = append(e, + field.Invalid(field.NewPath("spec").Child("database").Child("dbPassword").Child("secret"), r.Spec.Database.DBPassword.SecretName, + ErrorSpecValidationMissingDBPasswordSecret)) + } + + // disallow usage of any other image than the observability-exporter + if r.Spec.Exporter.Deployment.ExporterImage != "" && !strings.HasPrefix(r.Spec.Exporter.Deployment.ExporterImage, AllowedExporterImage) { + e = append(e, + field.Invalid(field.NewPath("spec").Child("exporter").Child("image"), r.Spec.Exporter.Deployment.ExporterImage, + ErrorSpecExporterImageNotAllowed)) + } + + // Return if any errors + if len(e) > 0 { + return nil, apierrors.NewInvalid(schema.GroupKind{Group: "observability.oracle.com", Kind: "DatabaseObserver"}, r.Name, e) + } + return nil, nil + +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *DatabaseObserver) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + databaseobserverlog.Info("validate update", "name", r.Name) + var e field.ErrorList + + // disallow usage of any other image than the observability-exporter + if r.Spec.Exporter.Deployment.ExporterImage != "" && !strings.HasPrefix(r.Spec.Exporter.Deployment.ExporterImage, AllowedExporterImage) { + e = append(e, + field.Invalid(field.NewPath("spec").Child("exporter").Child("image"), r.Spec.Exporter.Deployment.ExporterImage, + ErrorSpecExporterImageNotAllowed)) + } + // Return if any errors + if len(e) > 0 { + return nil, apierrors.NewInvalid(schema.GroupKind{Group: "observability.oracle.com", Kind: "DatabaseObserver"}, r.Name, e) + } + return nil, nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *DatabaseObserver) ValidateDelete() (admission.Warnings, error) { + databaseobserverlog.Info("validate delete", "name", r.Name) + + return nil, nil +} diff --git a/apis/observability/v4/groupversion_info.go b/apis/observability/v4/groupversion_info.go new file mode 100644 index 00000000..155b1c11 --- /dev/null +++ b/apis/observability/v4/groupversion_info.go @@ -0,0 +1,58 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +// Package v4 contains API Schema definitions for the observability v4 API group +// +kubebuilder:object:generate=true +// +groupName=observability.oracle.com +package v4 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "observability.oracle.com", Version: "v4"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/observability/v4/zz_generated.deepcopy.go b/apis/observability/v4/zz_generated.deepcopy.go new file mode 100644 index 00000000..d9892643 --- /dev/null +++ b/apis/observability/v4/zz_generated.deepcopy.go @@ -0,0 +1,481 @@ +//go:build !ignore_autogenerated + +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +// Code generated by controller-gen. DO NOT EDIT. + +package v4 + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapDetails) DeepCopyInto(out *ConfigMapDetails) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapDetails. +func (in *ConfigMapDetails) DeepCopy() *ConfigMapDetails { + if in == nil { + return nil + } + out := new(ConfigMapDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DBSecret) DeepCopyInto(out *DBSecret) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DBSecret. +func (in *DBSecret) DeepCopy() *DBSecret { + if in == nil { + return nil + } + out := new(DBSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DBSecretWithVault) DeepCopyInto(out *DBSecretWithVault) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DBSecretWithVault. +func (in *DBSecretWithVault) DeepCopy() *DBSecretWithVault { + if in == nil { + return nil + } + out := new(DBSecretWithVault) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserver) DeepCopyInto(out *DatabaseObserver) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserver. +func (in *DatabaseObserver) DeepCopy() *DatabaseObserver { + if in == nil { + return nil + } + out := new(DatabaseObserver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DatabaseObserver) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverConfigMap) DeepCopyInto(out *DatabaseObserverConfigMap) { + *out = *in + out.Configmap = in.Configmap +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverConfigMap. +func (in *DatabaseObserverConfigMap) DeepCopy() *DatabaseObserverConfigMap { + if in == nil { + return nil + } + out := new(DatabaseObserverConfigMap) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverDatabase) DeepCopyInto(out *DatabaseObserverDatabase) { + *out = *in + out.DBUser = in.DBUser + out.DBPassword = in.DBPassword + out.DBWallet = in.DBWallet + out.DBConnectionString = in.DBConnectionString +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverDatabase. +func (in *DatabaseObserverDatabase) DeepCopy() *DatabaseObserverDatabase { + if in == nil { + return nil + } + out := new(DatabaseObserverDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverDeployment) DeepCopyInto(out *DatabaseObserverDeployment) { + *out = *in + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.SecurityContext) + (*in).DeepCopyInto(*out) + } + if in.ExporterArgs != nil { + in, out := &in.ExporterArgs, &out.ExporterArgs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExporterCommands != nil { + in, out := &in.ExporterCommands, &out.ExporterCommands + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExporterEnvs != nil { + in, out := &in.ExporterEnvs, &out.ExporterEnvs + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.DeploymentPodTemplate.DeepCopyInto(&out.DeploymentPodTemplate) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverDeployment. +func (in *DatabaseObserverDeployment) DeepCopy() *DatabaseObserverDeployment { + if in == nil { + return nil + } + out := new(DatabaseObserverDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverExporterConfig) DeepCopyInto(out *DatabaseObserverExporterConfig) { + *out = *in + in.Deployment.DeepCopyInto(&out.Deployment) + in.Service.DeepCopyInto(&out.Service) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverExporterConfig. +func (in *DatabaseObserverExporterConfig) DeepCopy() *DatabaseObserverExporterConfig { + if in == nil { + return nil + } + out := new(DatabaseObserverExporterConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverList) DeepCopyInto(out *DatabaseObserverList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DatabaseObserver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverList. +func (in *DatabaseObserverList) DeepCopy() *DatabaseObserverList { + if in == nil { + return nil + } + out := new(DatabaseObserverList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DatabaseObserverList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverService) DeepCopyInto(out *DatabaseObserverService) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]v1.ServicePort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverService. +func (in *DatabaseObserverService) DeepCopy() *DatabaseObserverService { + if in == nil { + return nil + } + out := new(DatabaseObserverService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverSpec) DeepCopyInto(out *DatabaseObserverSpec) { + *out = *in + out.Database = in.Database + in.Exporter.DeepCopyInto(&out.Exporter) + out.ExporterConfig = in.ExporterConfig + in.Prometheus.DeepCopyInto(&out.Prometheus) + out.OCIConfig = in.OCIConfig + out.Log = in.Log + if in.InheritLabels != nil { + in, out := &in.InheritLabels, &out.InheritLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExporterSidecars != nil { + in, out := &in.ExporterSidecars, &out.ExporterSidecars + *out = make([]v1.Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SideCarVolumes != nil { + in, out := &in.SideCarVolumes, &out.SideCarVolumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverSpec. +func (in *DatabaseObserverSpec) DeepCopy() *DatabaseObserverSpec { + if in == nil { + return nil + } + out := new(DatabaseObserverSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseObserverStatus) DeepCopyInto(out *DatabaseObserverStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseObserverStatus. +func (in *DatabaseObserverStatus) DeepCopy() *DatabaseObserverStatus { + if in == nil { + return nil + } + out := new(DatabaseObserverStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentPodTemplate) DeepCopyInto(out *DeploymentPodTemplate) { + *out = *in + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentPodTemplate. +func (in *DeploymentPodTemplate) DeepCopy() *DeploymentPodTemplate { + if in == nil { + return nil + } + out := new(DeploymentPodTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogConfig) DeepCopyInto(out *LogConfig) { + *out = *in + out.Volume = in.Volume +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogConfig. +func (in *LogConfig) DeepCopy() *LogConfig { + if in == nil { + return nil + } + out := new(LogConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogVolume) DeepCopyInto(out *LogVolume) { + *out = *in + out.PersistentVolumeClaim = in.PersistentVolumeClaim +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogVolume. +func (in *LogVolume) DeepCopy() *LogVolume { + if in == nil { + return nil + } + out := new(LogVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogVolumePVClaim) DeepCopyInto(out *LogVolumePVClaim) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogVolumePVClaim. +func (in *LogVolumePVClaim) DeepCopy() *LogVolumePVClaim { + if in == nil { + return nil + } + out := new(LogVolumePVClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIConfigSpec) DeepCopyInto(out *OCIConfigSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIConfigSpec. +func (in *OCIConfigSpec) DeepCopy() *OCIConfigSpec { + if in == nil { + return nil + } + out := new(OCIConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusConfig) DeepCopyInto(out *PrometheusConfig) { + *out = *in + in.ServiceMonitor.DeepCopyInto(&out.ServiceMonitor) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusConfig. +func (in *PrometheusConfig) DeepCopy() *PrometheusConfig { + if in == nil { + return nil + } + out := new(PrometheusConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusServiceMonitor) DeepCopyInto(out *PrometheusServiceMonitor) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(monitoringv1.NamespaceSelector) + (*in).DeepCopyInto(*out) + } + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]monitoringv1.Endpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusServiceMonitor. +func (in *PrometheusServiceMonitor) DeepCopy() *PrometheusServiceMonitor { + if in == nil { + return nil + } + out := new(PrometheusServiceMonitor) + in.DeepCopyInto(out) + return out +} diff --git a/commons/adb_family/utils.go b/commons/adb_family/utils.go index 8218502e..591b3130 100644 --- a/commons/adb_family/utils.go +++ b/commons/adb_family/utils.go @@ -39,35 +39,35 @@ package adbfamily import ( - dbv1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" "github.com/oracle/oracle-database-operator/commons/k8s" "sigs.k8s.io/controller-runtime/pkg/client" ) -// VerifyTargetADB searches if the target ADB is in the cluster, and set the owner reference to the ADB if it exists. +// VerifyTargetAdb searches if the target ADB is in the cluster. // The function returns two values in the following order: // ocid: the OCID of the target ADB. An empty string is returned if the ocid is nil. // ownerADB: the resource of the targetADB if it's found in the cluster -func VerifyTargetADB(kubeClient client.Client, target dbv1alpha1.TargetSpec, namespace string) (*dbv1alpha1.AutonomousDatabase, error) { +func VerifyTargetAdb(kubeClient client.Client, target dbv4.TargetSpec, namespace string) (*dbv4.AutonomousDatabase, error) { var err error - var ownerADB *dbv1alpha1.AutonomousDatabase + var ownerAdb *dbv4.AutonomousDatabase // Get the target ADB OCID - if target.K8sADB.Name != nil { + if target.K8sAdb.Name != nil { // Find the target ADB using the name of the k8s ADB - ownerADB = &dbv1alpha1.AutonomousDatabase{} - if err := k8s.FetchResource(kubeClient, namespace, *target.K8sADB.Name, ownerADB); err != nil { + ownerAdb = &dbv4.AutonomousDatabase{} + if err := k8s.FetchResource(kubeClient, namespace, *target.K8sAdb.Name, ownerAdb); err != nil { return nil, err } } else { // Find the target ADB using the ADB OCID - ownerADB, err = k8s.FetchAutonomousDatabaseWithOCID(kubeClient, namespace, *target.OCIADB.OCID) + ownerAdb, err = k8s.FetchAutonomousDatabaseWithOCID(kubeClient, namespace, *target.OciAdb.OCID) if err != nil { return nil, err } } - return ownerADB, nil + return ownerAdb, nil } diff --git a/commons/database/constants.go b/commons/database/constants.go index 6f27750d..940a2727 100644 --- a/commons/database/constants.go +++ b/commons/database/constants.go @@ -50,6 +50,8 @@ const DBA_GUID int64 = 54322 const SQLPlusCLI string = "sqlplus -s / as sysdba" +const SQLCLI string = "sql -s / as sysdba" + const GetVersionSQL string = "SELECT VERSION_FULL FROM V\\$INSTANCE;" const CheckModesSQL string = "SELECT 'log_mode:' || log_mode AS log_mode ,'flashback_on:' || flashback_on AS flashback_on ,'force_logging:' || force_logging AS force_logging FROM v\\$database;" @@ -182,10 +184,6 @@ const DataguardBrokerMaxPerformanceCMD string = "CREATE CONFIGURATION dg_config "\nADD DATABASE ${ORACLE_SID} AS CONNECT IDENTIFIER IS ${SVC_HOST}:1521/${ORACLE_SID} MAINTAINED AS PHYSICAL;" + "\nEDIT DATABASE ${PRIMARY_SID} SET PROPERTY LogXptMode='ASYNC';" + "\nEDIT DATABASE ${ORACLE_SID} SET PROPERTY LogXptMode='ASYNC';" + - "\nEDIT DATABASE ${PRIMARY_SID} SET PROPERTY STATICCONNECTIDENTIFIER='(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=${PRIMARY_IP})(PORT=1521))" + - "(CONNECT_DATA=(SERVICE_NAME=${PRIMARY_SID}_DGMGRL)(INSTANCE_NAME=${PRIMARY_SID})(SERVER=DEDICATED)))';" + - "\nEDIT DATABASE ${ORACLE_SID} SET PROPERTY STATICCONNECTIDENTIFIER='(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=${SVC_HOST})(PORT=1521))" + - "(CONNECT_DATA=(SERVICE_NAME=${ORACLE_SID}_DGMGRL)(INSTANCE_NAME=${ORACLE_SID})(SERVER=DEDICATED)))';" + "\nEDIT CONFIGURATION SET PROTECTION MODE AS MAXPERFORMANCE;" + "\nENABLE CONFIGURATION;" @@ -193,10 +191,6 @@ const DataguardBrokerMaxAvailabilityCMD string = "CREATE CONFIGURATION dg_config "\nADD DATABASE ${ORACLE_SID} AS CONNECT IDENTIFIER IS ${SVC_HOST}:1521/${ORACLE_SID} MAINTAINED AS PHYSICAL;" + "\nEDIT DATABASE ${PRIMARY_SID} SET PROPERTY LogXptMode='SYNC';" + "\nEDIT DATABASE ${ORACLE_SID} SET PROPERTY LogXptMode='SYNC';" + - "\nEDIT DATABASE ${PRIMARY_SID} SET PROPERTY STATICCONNECTIDENTIFIER='(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=${PRIMARY_IP})(PORT=1521))" + - "(CONNECT_DATA=(SERVICE_NAME=${PRIMARY_SID}_DGMGRL)(INSTANCE_NAME=${PRIMARY_SID})(SERVER=DEDICATED)))';" + - "\nEDIT DATABASE ${ORACLE_SID} SET PROPERTY STATICCONNECTIDENTIFIER='(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=${SVC_HOST})(PORT=1521))" + - "(CONNECT_DATA=(SERVICE_NAME=${ORACLE_SID}_DGMGRL)(INSTANCE_NAME=${ORACLE_SID})(SERVER=DEDICATED)))';" + "\nEDIT CONFIGURATION SET PROTECTION MODE AS MAXAVAILABILITY;" + "\nENABLE CONFIGURATION;" @@ -221,6 +215,9 @@ const DataguardBrokerGetDatabaseCMD string = "SELECT DATABASE || ':' || DATAGUAR const EnableFSFOCMD string = "ENABLE FAST_START FAILOVER;" +const DisableFSFOCMD string = "STOP OBSERVER %s" + + "\nDISABLE FAST_START FAILOVER;" + const RemoveDataguardConfiguration string = "DISABLE FAST_START FAILOVER;" + "\nEDIT CONFIGURATION SET PROTECTION MODE AS MAXPERFORMANCE;" + "\nREMOVE CONFIGURATION;" @@ -345,6 +342,8 @@ const InitORDSCMD string = "if [ -f $ORDS_HOME/config/ords/defaults.xml ]; then "\nrm -f sqladmin.passwd" + "\numask 022" +const DbConnectString string = "CONN_STRING=sys/%[1]s@%[2]s:1521/%[3]s" + const GetSessionInfoSQL string = "select s.sid || ',' || s.serial# as Info FROM v\\$session s, v\\$process p " + "WHERE (s.username = 'ORDS_PUBLIC_USER' or " + "s.username = 'APEX_PUBLIC_USER' or " + @@ -369,7 +368,9 @@ const UninstallORDSCMD string = "\numask 177" + "\nrm -rf /opt/oracle/ords/config/ords/standalone" + "\nrm -rf /opt/oracle/ords/config/ords/apex" -const GetORDSStatus string = "curl -sSkv -k -X GET https://localhost:8443/ords/_/db-api/stable/metadata-catalog/" +const GetORDSStatus string = "curl -sSkvf -k -X GET http://localhost:8181/ords/_/db-api/stable/metadata-catalog/" + +const ORDSReadinessProbe string = "curl -sSkvf -k -X GET http://localhost:8181/ords/_/landing" const ValidateAdminPassword string = "conn sys/\\\"%s\\\"@${ORACLE_SID} as sysdba\nshow user" @@ -403,6 +404,8 @@ const StatusReady string = "Healthy" const StatusError string = "Error" +const StatusUnknown string = "Unknown" + const ValueUnavailable string = "Unavailable" const NoExternalIp string = "Node ExternalIP unavailable" @@ -442,18 +445,18 @@ const ChownApex string = " chown oracle:oinstall /opt/oracle/oradata/${ORACLE_SI const InstallApex string = "if [ -f /opt/oracle/oradata/${ORACLE_SID^^}/apex/apexins.sql ]; then ( while true; do sleep 60; echo \"Installing Apex...\" ; done ) & " + " cd /opt/oracle/oradata/${ORACLE_SID^^}/apex && echo -e \"@apexins.sql SYSAUX SYSAUX TEMP /i/\" | %[1]s && kill -9 $!; else echo \"Apex Folder doesn't exist\" ; fi ;" -const InstallApexInContainer string = "cd ${ORDS_HOME}/config/apex/ && echo -e \"@apxsilentins.sql SYSAUX SYSAUX TEMP /i/ %[1]s %[1]s %[1]s %[1]s;\n" + +const InstallApexInContainer string = "cd ${APEX_HOME}/${APEX_VER} && echo -e \"@apxsilentins.sql SYSAUX SYSAUX TEMP /i/ %[1]s %[1]s %[1]s %[1]s;\n" + "@apex_rest_config_core.sql;\n" + "exec APEX_UTIL.set_workspace(p_workspace => 'INTERNAL');\n" + "exec APEX_UTIL.EDIT_USER(p_user_id => APEX_UTIL.GET_USER_ID('ADMIN'), p_user_name => 'ADMIN', p_change_password_on_first_use => 'Y');\n" + - "\" | sqlplus -s sys/%[2]s@${ORACLE_HOST}:${ORACLE_PORT}/%[3]s as sysdba;" + "\" | sql -s sys/%[2]s@${ORACLE_HOST}:${ORACLE_PORT}/%[3]s as sysdba;" const IsApexInstalled string = "echo -e \"select 'APEXVERSION:'||version as version FROM DBA_REGISTRY WHERE COMP_ID='APEX';\"" + - " | sqlplus -s sys/%[1]s@${ORACLE_HOST}:${ORACLE_PORT}/%[2]s as sysdba;" + " | sql -s sys/%[1]s@${ORACLE_HOST}:${ORACLE_PORT}/%[2]s as sysdba;" -const UninstallApex string = "cd ${ORDS_HOME}/config/apex/ && echo -e \"@apxremov.sql\n\" | sqlplus -s sys/%[1]s@${ORACLE_HOST}:${ORACLE_PORT}/%[2]s as sysdba;" +const UninstallApex string = "cd ${APEX_HOME}/${APEX_VER} && echo -e \"@apxremov.sql\n\" | sql -s sys/%[1]s@${ORACLE_HOST}:${ORACLE_PORT}/%[2]s as sysdba;" -const ConfigureApexRest string = "if [ -f ${ORDS_HOME}/config/apex/apex_rest_config.sql ]; then cd ${ORDS_HOME}/config/apex && " + +const ConfigureApexRest string = "if [ -f ${APEX_HOME}/${APEX_VER}/apex_rest_config.sql ]; then cd ${ORDS_HOME}/config/apex && " + "echo -e \"%[1]s\n%[1]s\" | %[2]s ; else echo \"Apex Folder doesn't exist\" ; fi ;" const AlterApexUsers string = "\nALTER SESSION SET CONTAINER=%[2]s;" + @@ -508,6 +511,9 @@ const SetApexUsers string = "\numask 177" + "\nrm -f apexPublicUser" + "\numask 022" +// Command to enable/disable MongoDB API support in ords pods +const ConfigMongoDb string = "ords config set mongo.enabled %[1]s" + // Get Sid, Pdbname, Edition for prebuilt db const GetSidPdbEditionCMD string = "echo $ORACLE_SID,$ORACLE_PDB,$ORACLE_EDITION;" diff --git a/commons/database/podbuilder.go b/commons/database/podbuilder.go new file mode 100644 index 00000000..c704c4fc --- /dev/null +++ b/commons/database/podbuilder.go @@ -0,0 +1,108 @@ +/* +** Copyright (c) 2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package commons + +import ( + "k8s.io/apimachinery/pkg/types" + + corev1 "k8s.io/api/core/v1" +) + +type PodBuilder interface { + SetNamespacedName(types.NamespacedName) *PodBuilder + SetLabels(map[string]string) *PodBuilder + SetTerminationGracePeriodSeconds(int64) *PodBuilder + SetNodeSelector(map[string]string) *PodBuilder + SetSecurityContext(corev1.PodSecurityContext) *PodBuilder + SetImagePullSecrets(string) *PodBuilder + AppendContainers(corev1.Container) *PodBuilder + Build() corev1.Pod +} + +type RealPodBuilder struct { + pod corev1.Pod +} + +func (rpb *RealPodBuilder) SetNamespacedName(namespacedName types.NamespacedName) *RealPodBuilder { + rpb.pod.ObjectMeta.Name = namespacedName.Name + rpb.pod.ObjectMeta.Namespace = namespacedName.Namespace + return rpb +} + +func (rpb *RealPodBuilder) SetLabels(labels map[string]string) *RealPodBuilder { + rpb.pod.ObjectMeta.Labels = labels + return rpb +} + +func (rpb *RealPodBuilder) SetTerminationGracePeriodSeconds(terminationGracePeriod int64) *RealPodBuilder { + rpb.pod.Spec.TerminationGracePeriodSeconds = &terminationGracePeriod + return rpb +} + +func (rpb *RealPodBuilder) SetNodeSelector(nsRule map[string]string) *RealPodBuilder { + rpb.pod.Spec.NodeSelector = nsRule + return rpb +} + +func (rpb *RealPodBuilder) SetSecurityContext(podSecurityContext corev1.PodSecurityContext) *RealPodBuilder { + rpb.pod.Spec.SecurityContext = &podSecurityContext + return rpb +} + +func (rpb *RealPodBuilder) SetImagePullSecrets(imagePullSecret string) *RealPodBuilder { + rpb.pod.Spec.ImagePullSecrets = []corev1.LocalObjectReference{ + { + Name: imagePullSecret, + }, + } + return rpb +} + +func (rpb *RealPodBuilder) AppendContainers(container corev1.Container) *RealPodBuilder { + rpb.pod.Spec.Containers = append(rpb.pod.Spec.Containers, container) + return rpb +} + +func (rpb *RealPodBuilder) Build() corev1.Pod { + return rpb.pod +} + +func NewRealPodBuilder() *RealPodBuilder { + return &RealPodBuilder{} +} diff --git a/commons/database/svcbuilder.go b/commons/database/svcbuilder.go new file mode 100644 index 00000000..8029c8ee --- /dev/null +++ b/commons/database/svcbuilder.go @@ -0,0 +1,99 @@ +/* +** Copyright (c) 2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package commons + +import ( + corev1 "k8s.io/api/core/v1" +) + +type ServiceBuilder interface { + SetName(string) *ServiceBuilder + SetNamespace(string) *ServiceBuilder + SetLabels(map[string]string) *ServiceBuilder + SetAnnotation(map[string]string) *ServiceBuilder + SetPorts([]corev1.ServicePort) *ServiceBuilder + SetSelector(map[string]string) *ServiceBuilder + SetPublishNotReadyAddresses(bool) *ServiceBuilder + SetServiceType(corev1.ServiceType) *ServiceBuilder + Build() *corev1.Service +} + +type RealServiceBuilder struct { + service corev1.Service +} + +func (rsb *RealServiceBuilder) SetName(name string) *RealServiceBuilder { + rsb.service.ObjectMeta.Name = name + return rsb +} +func (rsb *RealServiceBuilder) SetNamespace(namespace string) *RealServiceBuilder { + rsb.service.ObjectMeta.Namespace = namespace + return rsb +} +func (rsb *RealServiceBuilder) SetLabels(labels map[string]string) *RealServiceBuilder { + rsb.service.ObjectMeta.Labels = labels + return rsb +} +func (rsb *RealServiceBuilder) SetAnnotation(annotations map[string]string) *RealServiceBuilder { + rsb.service.ObjectMeta.Annotations = annotations + return rsb +} +func (rsb *RealServiceBuilder) SetPorts(ports []corev1.ServicePort) *RealServiceBuilder { + rsb.service.Spec.Ports = ports + return rsb +} +func (rsb *RealServiceBuilder) SetSelector(selector map[string]string) *RealServiceBuilder { + rsb.service.Spec.Selector = selector + return rsb +} +func (rsb *RealServiceBuilder) SetPublishNotReadyAddresses(flag bool) *RealServiceBuilder { + rsb.service.Spec.PublishNotReadyAddresses = flag + return rsb +} +func (rsb *RealServiceBuilder) SetType(serviceType corev1.ServiceType) *RealServiceBuilder { + rsb.service.Spec.Type = serviceType + return rsb +} +func (rsb *RealServiceBuilder) Build() corev1.Service { + return rsb.service +} + +func NewRealServiceBuilder() *RealServiceBuilder { + return &RealServiceBuilder{} +} diff --git a/commons/database/utils.go b/commons/database/utils.go index 1723bc90..e0536642 100644 --- a/commons/database/utils.go +++ b/commons/database/utils.go @@ -502,32 +502,6 @@ func GetPrimaryDatabase(databases []string) string { return primary } -// Returns the databases in DG config . -func GetDatabasesInDgConfig(readyPod corev1.Pod, r client.Reader, - config *rest.Config, ctx context.Context, req ctrl.Request) ([]string, string, error) { - log := ctrllog.FromContext(ctx).WithValues("GetDatabasesInDgConfig", req.NamespacedName) - - // ## FIND DATABASES PRESENT IN DG CONFIGURATION - out, err := ExecCommand(r, config, readyPod.Name, readyPod.Namespace, "", ctx, req, false, "bash", "-c", - fmt.Sprintf("echo -e \"%s\" | sqlplus -s / as sysdba ", DataguardBrokerGetDatabaseCMD)) - if err != nil { - return []string{}, "", err - } - log.Info("GetDatabasesInDgConfig Output") - log.Info(out) - - if !strings.Contains(out, "no rows selected") && !strings.Contains(out, "ORA-") { - out1 := strings.Replace(out, " ", "_", -1) - // filtering output and storing databses in dg configuration in "databases" slice - databases := strings.Fields(out1) - - // first 2 values in the slice will be column name(DATABASES) and a seperator(--------------) . so take the slice from position [2:] - databases = databases[2:] - return databases, out, nil - } - return []string{}, out, errors.New("databases in DG config is nil") -} - // Returns Database version func GetDatabaseVersion(readyPod corev1.Pod, r client.Reader, config *rest.Config, ctx context.Context, req ctrl.Request) (string, error) { diff --git a/commons/dbcssystem/dbcs_reconciler.go b/commons/dbcssystem/dbcs_reconciler.go index 60905c76..6c498320 100644 --- a/commons/dbcssystem/dbcs_reconciler.go +++ b/commons/dbcssystem/dbcs_reconciler.go @@ -43,6 +43,8 @@ import ( "encoding/json" "errors" "fmt" + "reflect" + "strings" "time" "github.com/go-logr/logr" @@ -51,68 +53,102 @@ import ( "github.com/oracle/oci-go-sdk/v65/database" "github.com/oracle/oci-go-sdk/v65/workrequests" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" - databasev1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + databasev4 "github.com/oracle/oracle-database-operator/apis/database/v4" "github.com/oracle/oracle-database-operator/commons/annotations" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" ) -func CreateAndGetDbcsId(logger logr.Logger, kubeClient client.Client, dbClient database.DatabaseClient, dbcs *databasev1alpha1.DbcsSystem, nwClient core.VirtualNetworkClient, wrClient workrequests.WorkRequestClient) (string, error) { +const ( + checkInterval = 30 * time.Second + timeout = 15 * time.Minute +) + +func CreateAndGetDbcsId(logger logr.Logger, kubeClient client.Client, dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem, nwClient core.VirtualNetworkClient, wrClient workrequests.WorkRequestClient, kmsDetails *databasev4.KMSDetailsStatus) (string, error) { - //var provisionedDbcsSystemId string ctx := context.TODO() - // Get DB System Details - dbcsDetails := database.LaunchDbSystemDetails{} + // Check if DBCS system already exists using the displayName + listDbcsRequest := database.ListDbSystemsRequest{ + CompartmentId: common.String(dbcs.Spec.DbSystem.CompartmentId), + DisplayName: common.String(dbcs.Spec.DbSystem.DisplayName), + } + + listDbcsResponse, err := dbClient.ListDbSystems(ctx, listDbcsRequest) + if err != nil { + return "", err + } + + // Check if any DBCS system matches the display name + if len(listDbcsResponse.Items) > 0 { + for _, dbcsItem := range listDbcsResponse.Items { + if dbcsItem.DisplayName != nil && *dbcsItem.DisplayName == dbcs.Spec.DbSystem.DisplayName { + logger.Info("DBCS system already exists", "DBCS ID", *dbcsItem.Id) + return *dbcsItem.Id, nil + } + } + } + // Get the admin password from OCI key sshPublicKeys, err := getPublicSSHKey(kubeClient, dbcs) if err != nil { return "", err } - // Get Db SystemOption + + // Get DB SystemOptions dbSystemReq := GetDBSystemopts(dbcs) licenceModel := getLicenceModel(dbcs) - if dbcs.Spec.DbSystem.ClusterName != "" { - dbcsDetails.ClusterName = &dbcs.Spec.DbSystem.ClusterName - } - - if dbcs.Spec.DbSystem.TimeZone != "" { - dbcsDetails.TimeZone = &dbcs.Spec.DbSystem.TimeZone - } // Get DB Home Details dbHomeReq, err := GetDbHomeDetails(kubeClient, dbClient, dbcs) if err != nil { return "", err } - //tenancyOcid, _ := provider.TenancyOCID() - dbcsDetails.AvailabilityDomain = common.String(dbcs.Spec.DbSystem.AvailabilityDomain) - dbcsDetails.CompartmentId = common.String(dbcs.Spec.DbSystem.CompartmentId) - dbcsDetails.SubnetId = common.String(dbcs.Spec.DbSystem.SubnetId) - dbcsDetails.Shape = common.String(dbcs.Spec.DbSystem.Shape) - dbcsDetails.Domain = common.String(dbcs.Spec.DbSystem.Domain) - if dbcs.Spec.DbSystem.DisplayName != "" { - dbcsDetails.DisplayName = common.String(dbcs.Spec.DbSystem.DisplayName) - } - dbcsDetails.SshPublicKeys = []string{sshPublicKeys} - dbcsDetails.Hostname = common.String(dbcs.Spec.DbSystem.HostName) - dbcsDetails.CpuCoreCount = common.Int(dbcs.Spec.DbSystem.CpuCoreCount) - //dbcsDetails.SourceDbSystemId = common.String(r.tenancyOcid) - dbcsDetails.NodeCount = common.Int(GetNodeCount(dbcs)) - dbcsDetails.InitialDataStorageSizeInGB = common.Int(GetInitialStorage(dbcs)) - dbcsDetails.DbSystemOptions = &dbSystemReq - dbcsDetails.DbHome = &dbHomeReq - dbcsDetails.DatabaseEdition = GetDBEdition(dbcs) - dbcsDetails.DiskRedundancy = GetDBbDiskRedundancy(dbcs) - dbcsDetails.LicenseModel = database.LaunchDbSystemDetailsLicenseModelEnum(licenceModel) + + // Determine CpuCoreCount + cpuCoreCount := 2 // default value + if dbcs.Spec.DbSystem.CpuCoreCount > 0 { + cpuCoreCount = dbcs.Spec.DbSystem.CpuCoreCount + } + + // Set up DB system details + dbcsDetails := database.LaunchDbSystemDetails{ + AvailabilityDomain: common.String(dbcs.Spec.DbSystem.AvailabilityDomain), + CompartmentId: common.String(dbcs.Spec.DbSystem.CompartmentId), + SubnetId: common.String(dbcs.Spec.DbSystem.SubnetId), + Shape: common.String(dbcs.Spec.DbSystem.Shape), + Domain: common.String(dbcs.Spec.DbSystem.Domain), + DisplayName: common.String(dbcs.Spec.DbSystem.DisplayName), + SshPublicKeys: []string{sshPublicKeys}, + Hostname: common.String(dbcs.Spec.DbSystem.HostName), + CpuCoreCount: common.Int(cpuCoreCount), + NodeCount: common.Int(GetNodeCount(dbcs)), + InitialDataStorageSizeInGB: common.Int(GetInitialStorage(dbcs)), + DbSystemOptions: &dbSystemReq, + DbHome: &dbHomeReq, + DatabaseEdition: GetDBEdition(dbcs), + DiskRedundancy: GetDBbDiskRedundancy(dbcs), + LicenseModel: database.LaunchDbSystemDetailsLicenseModelEnum(licenceModel), + } + if len(dbcs.Spec.DbSystem.Tags) != 0 { dbcsDetails.FreeformTags = dbcs.Spec.DbSystem.Tags } + // Add KMS details if available + if kmsDetails != nil && kmsDetails.VaultId != "" { + dbcsDetails.KmsKeyId = common.String(kmsDetails.KeyId) + dbcsDetails.DbHome.Database.KmsKeyId = common.String(kmsDetails.KeyId) + dbcsDetails.DbHome.Database.VaultId = common.String(kmsDetails.VaultId) + } + + // Log dbcsDetails for debugging + logger.Info("Launching DB System with details", "dbcsDetails", dbcsDetails) + req := database.LaunchDbSystemRequest{LaunchDbSystemDetails: dbcsDetails} // Send the request using the service client @@ -122,12 +158,14 @@ func CreateAndGetDbcsId(logger logr.Logger, kubeClient client.Client, dbClient d } dbcs.Spec.Id = resp.DbSystem.Id + // Change the phase to "Provisioning" - if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev1alpha1.Provision, nwClient, wrClient); statusErr != nil { + if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev4.Provision, nwClient, wrClient); statusErr != nil { return "", statusErr } + // Check the State - _, err = CheckResourceState(logger, dbClient, *resp.DbSystem.Id, string(databasev1alpha1.Provision), string(databasev1alpha1.Available)) + _, err = CheckResourceState(logger, dbClient, *resp.DbSystem.Id, string(databasev4.Provision), string(databasev4.Available)) if err != nil { return "", err } @@ -135,16 +173,418 @@ func CreateAndGetDbcsId(logger logr.Logger, kubeClient client.Client, dbClient d return *resp.DbSystem.Id, nil } +func parseLicenseModel(licenseModelStr string) (database.DbSystemLicenseModelEnum, error) { + switch licenseModelStr { + case "LICENSE_INCLUDED": + return database.DbSystemLicenseModelLicenseIncluded, nil + case "BRING_YOUR_OWN_LICENSE": + return database.DbSystemLicenseModelBringYourOwnLicense, nil + default: + return "", fmt.Errorf("invalid license model: %s", licenseModelStr) + } +} +func convertLicenseModel(licenseModel database.DbSystemLicenseModelEnum) (database.LaunchDbSystemFromDbSystemDetailsLicenseModelEnum, error) { + switch licenseModel { + case database.DbSystemLicenseModelLicenseIncluded: + return database.LaunchDbSystemFromDbSystemDetailsLicenseModelLicenseIncluded, nil + case database.DbSystemLicenseModelBringYourOwnLicense: + return database.LaunchDbSystemFromDbSystemDetailsLicenseModelBringYourOwnLicense, nil + default: + return "", fmt.Errorf("unsupported license model: %s", licenseModel) + } +} + +func CloneAndGetDbcsId(logger logr.Logger, kubeClient client.Client, dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem, nwClient core.VirtualNetworkClient, wrClient workrequests.WorkRequestClient) (string, error) { + ctx := context.TODO() + var err error + dbAdminPassword := "" + // tdePassword := "" + logger.Info("Starting the clone process for DBCS", "dbcs", dbcs) + // Get the admin password from Kubernetes secret + if dbcs.Spec.DbClone.DbAdminPasswordSecret != "" { + dbAdminPassword, err = GetCloningAdminPassword(kubeClient, dbcs) + if err != nil { + logger.Error(err, "Failed to get DB Admin password") + } + // logger.Info(dbAdminPassword) + } + // // Log retrieved passwords + logger.Info("Retrieved passwords from Kubernetes secrets") + + // // // Retrieve the TDE wallet password from Kubernetes secrets + // // tdePassword, err := GetTdePassword(kubeClient, dbcs.Namespace, dbcs.Spec.TdeWalletPasswordSecretName) + // // if err != nil { + // // logger.Error(err, "Failed to get TDE wallet password from Kubernetes secret", "namespace", dbcs.Namespace, "secretName", dbcs.Spec.TdeWalletPasswordSecretName) + // // return "", err + // // } + sshPublicKeys, err := getCloningPublicSSHKey(kubeClient, dbcs) + if err != nil { + logger.Error(err, "failed to get SSH public key") + } + + // Fetch the existing DB system details + existingDbSystem, err := dbClient.GetDbSystem(ctx, database.GetDbSystemRequest{ + DbSystemId: dbcs.Spec.Id, + }) + if err != nil { + return "", err + } + logger.Info("Retrieved existing Db System Details from OCI using Spec.Id") + + // // Create the clone request payload + // // Create the DbHome details + // Prepare CreateDatabaseFromDbSystemDetails + databaseDetails := &database.CreateDatabaseFromDbSystemDetails{ + AdminPassword: &dbAdminPassword, + DbName: &dbcs.Spec.DbClone.DbName, + DbDomain: existingDbSystem.DbSystem.Domain, + DbUniqueName: &dbcs.Spec.DbClone.DbUniqueName, + FreeformTags: existingDbSystem.DbSystem.FreeformTags, + DefinedTags: existingDbSystem.DbSystem.DefinedTags, + } + licenseModelEnum, err := parseLicenseModel(dbcs.Spec.DbClone.LicenseModel) + if err != nil { + return "", err + } + launchLicenseModel, err := convertLicenseModel(licenseModelEnum) + if err != nil { + return "", err + } + + cloneRequest := database.LaunchDbSystemFromDbSystemDetails{ + CompartmentId: existingDbSystem.DbSystem.CompartmentId, + AvailabilityDomain: existingDbSystem.DbSystem.AvailabilityDomain, + SubnetId: &dbcs.Spec.DbClone.SubnetId, + Shape: existingDbSystem.DbSystem.Shape, + SshPublicKeys: []string{sshPublicKeys}, + Hostname: &dbcs.Spec.DbClone.HostName, + CpuCoreCount: existingDbSystem.DbSystem.CpuCoreCount, + SourceDbSystemId: existingDbSystem.DbSystem.Id, + DbHome: &database.CreateDbHomeFromDbSystemDetails{ + Database: databaseDetails, + DisplayName: existingDbSystem.DbSystem.DisplayName, + FreeformTags: existingDbSystem.DbSystem.FreeformTags, + DefinedTags: existingDbSystem.DbSystem.DefinedTags, + }, + FaultDomains: existingDbSystem.DbSystem.FaultDomains, + DisplayName: &dbcs.Spec.DbClone.DisplayName, + BackupSubnetId: existingDbSystem.DbSystem.BackupSubnetId, + NsgIds: existingDbSystem.DbSystem.NsgIds, + BackupNetworkNsgIds: existingDbSystem.DbSystem.BackupNetworkNsgIds, + TimeZone: existingDbSystem.DbSystem.TimeZone, + DbSystemOptions: existingDbSystem.DbSystem.DbSystemOptions, + SparseDiskgroup: existingDbSystem.DbSystem.SparseDiskgroup, + Domain: &dbcs.Spec.DbClone.Domain, + ClusterName: existingDbSystem.DbSystem.ClusterName, + DataStoragePercentage: existingDbSystem.DbSystem.DataStoragePercentage, + // KmsKeyId: existingDbSystem.DbSystem.KmsKeyId, + // KmsKeyVersionId: existingDbSystem.DbSystem.KmsKeyVersionId, + NodeCount: existingDbSystem.DbSystem.NodeCount, + FreeformTags: existingDbSystem.DbSystem.FreeformTags, + DefinedTags: existingDbSystem.DbSystem.DefinedTags, + DataCollectionOptions: existingDbSystem.DbSystem.DataCollectionOptions, + LicenseModel: launchLicenseModel, + } + + // Execute the clone request + response, err := dbClient.LaunchDbSystem(ctx, database.LaunchDbSystemRequest{ + LaunchDbSystemDetails: cloneRequest, + }) + if err != nil { + return "", err + } + + dbcs.Status.DbCloneStatus.Id = response.DbSystem.Id + + // Change the phase to "Provisioning" + if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev4.Provision, nwClient, wrClient); statusErr != nil { + return "", statusErr + } + + // Check the state + _, err = CheckResourceState(logger, dbClient, *response.DbSystem.Id, string(databasev4.Provision), string(databasev4.Available)) + if err != nil { + return "", err + } + + return *response.DbSystem.Id, nil + // return "", nil +} + +// CloneFromBackupAndGetDbcsId clones a DB system from a backup and returns the new DB system's OCID. +func CloneFromBackupAndGetDbcsId(logger logr.Logger, kubeClient client.Client, dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem, nwClient core.VirtualNetworkClient, wrClient workrequests.WorkRequestClient) (string, error) { + ctx := context.TODO() + + var err error + var dbAdminPassword string + var tdePassword string + logger.Info("Starting the clone process for DBCS from backup", "dbcs", dbcs) + backupResp, err := dbClient.GetBackup(ctx, database.GetBackupRequest{ + BackupId: dbcs.Spec.DbBackupId, + }) + + if err != nil { + fmt.Println("Error getting backup details:", err) + return "", err + } + databaseId := backupResp.Backup.DatabaseId + // Fetch the existing Database details + existingDatabase, err := dbClient.GetDatabase(ctx, database.GetDatabaseRequest{ + DatabaseId: databaseId, + }) + if err != nil { + logger.Error(err, "Failed to retrieve existing Database details") + return "", err + } + // Check if DbSystemId is available + dbSystemId := existingDatabase.DbSystemId + if dbSystemId == nil { + // handle the case where DbSystemId is not available + logger.Error(err, "DBSystemId not found") + return "", err + } + + // Fetch the existing DB system details + existingDbSystem, err := dbClient.GetDbSystem(ctx, database.GetDbSystemRequest{ + DbSystemId: dbSystemId, + }) + if err != nil { + return "", err + } + // Get the admin password from Kubernetes secret + if dbcs.Spec.DbClone.DbAdminPasswordSecret != "" { + dbAdminPassword, err = GetCloningAdminPassword(kubeClient, dbcs) + if err != nil { + logger.Error(err, "Failed to get DB Admin password") + } + // logger.Info(dbAdminPassword) + } + // // // Retrieve the TDE wallet password from Kubernetes secrets to open backup DB using TDE Wallet + if dbcs.Spec.DbClone.TdeWalletPasswordSecret != "" { + tdePassword, err = GetCloningTdePassword(kubeClient, dbcs) + if err != nil { + logger.Error(err, "Failed to get TDE wallet password from Kubernetes secret") + return "", err + } + } + + sshPublicKeys, err := getCloningPublicSSHKey(kubeClient, dbcs) + if err != nil { + logger.Error(err, "failed to get SSH public key") + return "", err + } + + // Create the clone request payload + cloneRequest := database.LaunchDbSystemFromBackupDetails{ + CompartmentId: existingDbSystem.DbSystem.CompartmentId, + AvailabilityDomain: existingDbSystem.DbSystem.AvailabilityDomain, + SubnetId: &dbcs.Spec.DbClone.SubnetId, + Shape: existingDbSystem.DbSystem.Shape, + SshPublicKeys: []string{sshPublicKeys}, + Hostname: &dbcs.Spec.DbClone.HostName, + CpuCoreCount: existingDbSystem.DbSystem.CpuCoreCount, + DbHome: &database.CreateDbHomeFromBackupDetails{ + Database: &database.CreateDatabaseFromBackupDetails{ // Corrected type here + BackupId: dbcs.Spec.DbBackupId, + AdminPassword: &dbAdminPassword, + BackupTDEPassword: &tdePassword, + DbName: &dbcs.Spec.DbClone.DbName, + // DbDomain: existingDbSystem.DbSystem.Domain, + DbUniqueName: &dbcs.Spec.DbClone.DbUniqueName, + // FreeformTags: existingDbSystem.DbSystem.FreeformTags, + // DefinedTags: existingDbSystem.DbSystem.DefinedTags, + SidPrefix: &dbcs.Spec.DbClone.SidPrefix, + }, + DisplayName: existingDbSystem.DbSystem.DisplayName, + FreeformTags: existingDbSystem.DbSystem.FreeformTags, + DefinedTags: existingDbSystem.DbSystem.DefinedTags, + }, + FaultDomains: existingDbSystem.DbSystem.FaultDomains, + DisplayName: &dbcs.Spec.DbClone.DisplayName, + BackupSubnetId: existingDbSystem.DbSystem.BackupSubnetId, + NsgIds: existingDbSystem.DbSystem.NsgIds, + BackupNetworkNsgIds: existingDbSystem.DbSystem.BackupNetworkNsgIds, + TimeZone: existingDbSystem.DbSystem.TimeZone, + DbSystemOptions: existingDbSystem.DbSystem.DbSystemOptions, + SparseDiskgroup: existingDbSystem.DbSystem.SparseDiskgroup, + Domain: &dbcs.Spec.DbClone.Domain, + ClusterName: existingDbSystem.DbSystem.ClusterName, + DataStoragePercentage: existingDbSystem.DbSystem.DataStoragePercentage, + InitialDataStorageSizeInGB: &dbcs.Spec.DbClone.InitialDataStorageSizeInGB, + KmsKeyId: &dbcs.Spec.DbClone.KmsKeyId, + KmsKeyVersionId: &dbcs.Spec.DbClone.KmsKeyVersionId, + NodeCount: existingDbSystem.DbSystem.NodeCount, + FreeformTags: existingDbSystem.DbSystem.FreeformTags, + DefinedTags: existingDbSystem.DbSystem.DefinedTags, + DataCollectionOptions: existingDbSystem.DbSystem.DataCollectionOptions, + DatabaseEdition: database.LaunchDbSystemFromBackupDetailsDatabaseEditionEnum(existingDbSystem.DbSystem.DatabaseEdition), + LicenseModel: database.LaunchDbSystemFromBackupDetailsLicenseModelEnum(existingDbSystem.DbSystem.LicenseModel), + StorageVolumePerformanceMode: database.LaunchDbSystemBaseStorageVolumePerformanceModeEnum(existingDbSystem.DbSystem.StorageVolumePerformanceMode), + } + + // Execute the clone request + response, err := dbClient.LaunchDbSystem(ctx, database.LaunchDbSystemRequest{ + LaunchDbSystemDetails: cloneRequest, + }) + if err != nil { + return "", err + } + + dbcs.Status.DbCloneStatus.Id = response.DbSystem.Id + + // Change the phase to "Provisioning" + if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev4.Provision, nwClient, wrClient); statusErr != nil { + return "", statusErr + } + + // Check the state + _, err = CheckResourceState(logger, dbClient, *response.DbSystem.Id, string(databasev4.Provision), string(databasev4.Available)) + if err != nil { + return "", err + } + + return *response.DbSystem.Id, nil +} + // Sync the DbcsSystem Database details +func CloneFromDatabaseAndGetDbcsId(logger logr.Logger, kubeClient client.Client, dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem, nwClient core.VirtualNetworkClient, wrClient workrequests.WorkRequestClient) (string, error) { + ctx := context.TODO() + var err error + dbAdminPassword := "" + tdePassword := "" + logger.Info("Starting the clone process for Database", "dbcs", dbcs) + + // Get the admin password from Kubernetes secret + if dbcs.Spec.DbClone.DbAdminPasswordSecret != "" { + dbAdminPassword, err = GetCloningAdminPassword(kubeClient, dbcs) + if err != nil { + logger.Error(err, "Failed to get DB Admin password") + return "", err + } + } + // // // Retrieve the TDE wallet password from Kubernetes secrets to open backup DB using TDE Wallet + if dbcs.Spec.DbClone.TdeWalletPasswordSecret != "" { + tdePassword, err = GetCloningTdePassword(kubeClient, dbcs) + if err != nil { + logger.Error(err, "Failed to get TDE wallet password from Kubernetes secret") + return "", err + } + } + + logger.Info("Retrieved passwords from Kubernetes secrets") + + // Fetch the existing Database details + existingDatabase, err := dbClient.GetDatabase(ctx, database.GetDatabaseRequest{ + DatabaseId: dbcs.Spec.DatabaseId, + }) + if err != nil { + logger.Error(err, "Failed to retrieve existing Database details") + return "", err + } + // Check if DbSystemId is available + dbSystemId := existingDatabase.DbSystemId + if dbSystemId == nil { + // handle the case where DbSystemId is not available + logger.Error(err, "DBSystemId not found") + return "", err + } + + // Fetch the existing DB system details + existingDbSystem, err := dbClient.GetDbSystem(ctx, database.GetDbSystemRequest{ + DbSystemId: dbSystemId, + }) + if err != nil { + return "", err + } + logger.Info("Retrieved existing Database details from OCI", "DatabaseId", dbcs.Spec.DatabaseId) + + // Get SSH public key + sshPublicKeys, err := getCloningPublicSSHKey(kubeClient, dbcs) + if err != nil { + logger.Error(err, "Failed to get SSH public key") + return "", err + } + + // Create the clone request payload + cloneRequest := database.LaunchDbSystemFromDatabaseDetails{ + CompartmentId: existingDatabase.CompartmentId, + AvailabilityDomain: existingDbSystem.DbSystem.AvailabilityDomain, + SubnetId: existingDbSystem.DbSystem.SubnetId, + Shape: existingDbSystem.DbSystem.Shape, + SshPublicKeys: []string{sshPublicKeys}, + Hostname: &dbcs.Spec.DbClone.HostName, + CpuCoreCount: existingDbSystem.DbSystem.CpuCoreCount, + DatabaseEdition: database.LaunchDbSystemFromDatabaseDetailsDatabaseEditionEnum(existingDbSystem.DbSystem.DatabaseEdition), + DbHome: &database.CreateDbHomeFromDatabaseDetails{ + Database: &database.CreateDatabaseFromAnotherDatabaseDetails{ + // Mandatory fields + DatabaseId: dbcs.Spec.DatabaseId, // Source database ID + // Optionally fill in other fields if needed + DbName: &dbcs.Spec.DbClone.DbName, + AdminPassword: &dbAdminPassword, // Admin password for the new database + // The password to open the TDE wallet. + BackupTDEPassword: &tdePassword, + + DbUniqueName: &dbcs.Spec.DbClone.DbUniqueName, + }, + + // Provide a display name for the new Database Home + DisplayName: existingDbSystem.DbSystem.DisplayName, + FreeformTags: existingDbSystem.DbSystem.FreeformTags, + DefinedTags: existingDbSystem.DbSystem.DefinedTags, + }, + + FaultDomains: existingDbSystem.DbSystem.FaultDomains, + DisplayName: &dbcs.Spec.DbClone.DisplayName, + BackupSubnetId: existingDbSystem.DbSystem.BackupSubnetId, + NsgIds: existingDbSystem.DbSystem.NsgIds, + BackupNetworkNsgIds: existingDbSystem.DbSystem.BackupNetworkNsgIds, + TimeZone: existingDbSystem.DbSystem.TimeZone, + KmsKeyId: &dbcs.Spec.DbClone.KmsKeyId, + KmsKeyVersionId: &dbcs.Spec.DbClone.KmsKeyVersionId, + NodeCount: existingDbSystem.DbSystem.NodeCount, + FreeformTags: existingDbSystem.DbSystem.FreeformTags, + DefinedTags: existingDbSystem.DbSystem.DefinedTags, + // PrivateIp: &dbcs.Spec.DbClone.PrivateIp, + InitialDataStorageSizeInGB: &dbcs.Spec.DbClone.InitialDataStorageSizeInGB, + LicenseModel: database.LaunchDbSystemFromDatabaseDetailsLicenseModelEnum(existingDbSystem.DbSystem.LicenseModel), + StorageVolumePerformanceMode: database.LaunchDbSystemBaseStorageVolumePerformanceModeEnum(existingDbSystem.DbSystem.StorageVolumePerformanceMode), + } + + // logger.Info("Launching database clone", "cloneRequest", cloneRequest) + + // Execute the clone request + response, err := dbClient.LaunchDbSystem(ctx, database.LaunchDbSystemRequest{ + LaunchDbSystemDetails: cloneRequest, + }) + if err != nil { + return "", err + } + + dbcs.Status.DbCloneStatus.Id = response.DbSystem.Id + + // Change the phase to "Provisioning" + if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev4.Provision, nwClient, wrClient); statusErr != nil { + return "", statusErr + } + + // Check the state + _, err = CheckResourceState(logger, dbClient, *response.DbSystem.Id, string(databasev4.Provision), string(databasev4.Available)) + if err != nil { + return "", err + } + + return *response.DbSystem.Id, nil +} // Get admin password from Secret then OCI valut secret -func GetAdminPassword(kubeClient client.Client, dbcs *databasev1alpha1.DbcsSystem) (string, error) { - if dbcs.Spec.DbSystem.DbAdminPaswordSecret != "" { +func GetCloningAdminPassword(kubeClient client.Client, dbcs *databasev4.DbcsSystem) (string, error) { + if dbcs.Spec.DbClone.DbAdminPasswordSecret != "" { // Get the Admin Secret adminSecret := &corev1.Secret{} err := kubeClient.Get(context.TODO(), types.NamespacedName{ Namespace: dbcs.GetNamespace(), - Name: dbcs.Spec.DbSystem.DbAdminPaswordSecret, + Name: dbcs.Spec.DbClone.DbAdminPasswordSecret, }, adminSecret) if err != nil { @@ -154,7 +594,7 @@ func GetAdminPassword(kubeClient client.Client, dbcs *databasev1alpha1.DbcsSyste // Get the admin password key := "admin-password" if val, ok := adminSecret.Data[key]; ok { - return string(val), nil + return strings.TrimSpace(string(val)), nil } else { msg := "secret item not found: admin-password" return "", errors.New(msg) @@ -164,7 +604,33 @@ func GetAdminPassword(kubeClient client.Client, dbcs *databasev1alpha1.DbcsSyste } // Get admin password from Secret then OCI valut secret -func GetTdePassword(kubeClient client.Client, dbcs *databasev1alpha1.DbcsSystem) (string, error) { +func GetAdminPassword(kubeClient client.Client, dbcs *databasev4.DbcsSystem) (string, error) { + if dbcs.Spec.DbSystem.DbAdminPasswordSecret != "" { + // Get the Admin Secret + adminSecret := &corev1.Secret{} + err := kubeClient.Get(context.TODO(), types.NamespacedName{ + Namespace: dbcs.GetNamespace(), + Name: dbcs.Spec.DbSystem.DbAdminPasswordSecret, + }, adminSecret) + + if err != nil { + return "", err + } + + // Get the admin password + key := "admin-password" + if val, ok := adminSecret.Data[key]; ok { + return strings.TrimSpace(string(val)), nil + } else { + msg := "secret item not found: admin-password" + return "", errors.New(msg) + } + } + return "", errors.New("should provide either a Secret name or a Valut Secret ID") +} + +// Get admin password from Secret then OCI valut secret +func GetTdePassword(kubeClient client.Client, dbcs *databasev4.DbcsSystem) (string, error) { if dbcs.Spec.DbSystem.TdeWalletPasswordSecret != "" { // Get the Admin Secret tdeSecret := &corev1.Secret{} @@ -180,7 +646,7 @@ func GetTdePassword(kubeClient client.Client, dbcs *databasev1alpha1.DbcsSystem) // Get the admin password key := "tde-password" if val, ok := tdeSecret.Data[key]; ok { - return string(val), nil + return strings.TrimSpace(string(val)), nil } else { msg := "secret item not found: tde-password" return "", errors.New(msg) @@ -190,7 +656,33 @@ func GetTdePassword(kubeClient client.Client, dbcs *databasev1alpha1.DbcsSystem) } // Get admin password from Secret then OCI valut secret -func getPublicSSHKey(kubeClient client.Client, dbcs *databasev1alpha1.DbcsSystem) (string, error) { +func GetCloningTdePassword(kubeClient client.Client, dbcs *databasev4.DbcsSystem) (string, error) { + if dbcs.Spec.DbClone.TdeWalletPasswordSecret != "" { + // Get the Admin Secret + tdeSecret := &corev1.Secret{} + err := kubeClient.Get(context.TODO(), types.NamespacedName{ + Namespace: dbcs.GetNamespace(), + Name: dbcs.Spec.DbClone.TdeWalletPasswordSecret, + }, tdeSecret) + + if err != nil { + return "", err + } + + // Get the admin password + key := "tde-password" + if val, ok := tdeSecret.Data[key]; ok { + return strings.TrimSpace(string(val)), nil + } else { + msg := "secret item not found: tde-password" + return "", errors.New(msg) + } + } + return "", errors.New("should provide either a Secret name or a Valut Secret ID") +} + +// Get admin password from Secret then OCI valut secret +func getPublicSSHKey(kubeClient client.Client, dbcs *databasev4.DbcsSystem) (string, error) { if dbcs.Spec.DbSystem.SshPublicKeys[0] != "" { // Get the Admin Secret sshkeysecret := &corev1.Secret{} @@ -215,6 +707,32 @@ func getPublicSSHKey(kubeClient client.Client, dbcs *databasev1alpha1.DbcsSystem return "", errors.New("should provide either a Secret name or a Valut Secret ID") } +// Get admin password from Secret then OCI valut secret +func getCloningPublicSSHKey(kubeClient client.Client, dbcs *databasev4.DbcsSystem) (string, error) { + if dbcs.Spec.DbClone.SshPublicKeys[0] != "" { + // Get the Admin Secret + sshkeysecret := &corev1.Secret{} + err := kubeClient.Get(context.TODO(), types.NamespacedName{ + Namespace: dbcs.GetNamespace(), + Name: dbcs.Spec.DbClone.SshPublicKeys[0], + }, sshkeysecret) + + if err != nil { + return "", err + } + + // Get the admin password` + key := "publickey" + if val, ok := sshkeysecret.Data[key]; ok { + return string(val), nil + } else { + msg := "secret item not found: " + return "", errors.New(msg) + } + } + return "", errors.New("should provide either a Secret name or a Valut Secret ID") +} + // Delete DbcsSystem System func DeleteDbcsSystemSystem(dbClient database.DatabaseClient, Id string) error { @@ -233,24 +751,142 @@ func DeleteDbcsSystemSystem(dbClient database.DatabaseClient, Id string) error { } // SetLifecycleState set status.state of the reosurce. -func SetLifecycleState(kubeClient client.Client, dbClient database.DatabaseClient, dbcs *databasev1alpha1.DbcsSystem, state databasev1alpha1.LifecycleState, nwClient core.VirtualNetworkClient, wrClient workrequests.WorkRequestClient) error { - return retry.RetryOnConflict(retry.DefaultRetry, func() error { - dbcs.Status.State = state - // Set the status +func SetLifecycleState(kubeClient client.Client, dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem, state databasev4.LifecycleState, nwClient core.VirtualNetworkClient, wrClient workrequests.WorkRequestClient) error { + maxRetries := 5 + retryDelay := time.Second * 2 + + for attempt := 0; attempt < maxRetries; attempt++ { + // Fetch the latest version of the object + latestInstance := &databasev4.DbcsSystem{} + err := kubeClient.Get(context.TODO(), client.ObjectKeyFromObject(dbcs), latestInstance) + if err != nil { + // Log and return error if fetching the latest version fails + return fmt.Errorf("failed to fetch the latest version of DBCS instance: %w", err) + } + + // Merge the instance fields into latestInstance + err = mergeInstancesFromLatest(dbcs, latestInstance) + if err != nil { + return fmt.Errorf("failed to merge instances: %w", err) + } + + // Set the status using the dbcs object if statusErr := SetDBCSStatus(dbClient, dbcs, nwClient, wrClient); statusErr != nil { return statusErr } - if err := kubeClient.Status().Update(context.TODO(), dbcs); err != nil { - return err + + // Update the ResourceVersion of dbcs from latestInstance to avoid conflict + dbcs.ResourceVersion = latestInstance.ResourceVersion + + // Attempt to patch the status of the instance + err = kubeClient.Status().Patch(context.TODO(), dbcs, client.MergeFrom(latestInstance)) + if err != nil { + if apierrors.IsConflict(err) { + // Handle the conflict and retry + time.Sleep(retryDelay) + continue + } + // For other errors, log and return the error + return fmt.Errorf("failed to update the DBCS instance status: %w", err) } - return nil - }) + // If no error, break the loop + break + } + + return nil +} +func mergeInstancesFromLatest(instance, latestInstance *databasev4.DbcsSystem) error { + instanceVal := reflect.ValueOf(instance).Elem() + latestVal := reflect.ValueOf(latestInstance).Elem() + + // Fields to exclude from merging + excludeFields := map[string]bool{ + "ReleaseUpdate": true, + "AsmStorageStatus": true, + } + + // Loop through the fields in instance + for i := 0; i < instanceVal.NumField(); i++ { + field := instanceVal.Type().Field(i) + instanceField := instanceVal.Field(i) + latestField := latestVal.FieldByName(field.Name) + + // Skip unexported fields + if !isExported(field) { + continue + } + + // Ensure latestField is valid + if !latestField.IsValid() || !instanceField.CanSet() { + continue + } + + // Skip fields that are in the exclusion list + if excludeFields[field.Name] { + continue + } + + // Handle pointer fields + if latestField.Kind() == reflect.Ptr { + if !latestField.IsNil() && instanceField.IsNil() { + // If instance's field is nil and latest's field is not nil, set the latest's field value + instanceField.Set(latestField) + } + // If instance's field is not nil, do not overwrite + } else if latestField.Kind() == reflect.String { + if latestField.String() != "" && latestField.String() != "NOT_DEFINED" && instanceField.String() == "" { + // If latest's string field is non-empty and not "NOT_DEFINED", and instance's string field is empty, set the value + instanceField.Set(latestField) + } + } else if latestField.Kind() == reflect.Struct { + // Handle struct types recursively + mergeStructFields(instanceField, latestField) + } else { + // Handle other types if instance's field is zero value + if reflect.DeepEqual(instanceField.Interface(), reflect.Zero(instanceField.Type()).Interface()) { + instanceField.Set(latestField) + } + } + } + return nil +} + +func mergeStructFields(instanceField, latestField reflect.Value) { + for i := 0; i < instanceField.NumField(); i++ { + subField := instanceField.Type().Field(i) + instanceSubField := instanceField.Field(i) + latestSubField := latestField.Field(i) + + if !isExported(subField) || !instanceSubField.CanSet() { + continue + } + + if latestSubField.Kind() == reflect.Ptr { + if !latestSubField.IsNil() && instanceSubField.IsNil() { + instanceSubField.Set(latestSubField) + } + } else if latestSubField.Kind() == reflect.String { + if latestSubField.String() != "" && latestSubField.String() != "NOT_DEFINED" && instanceSubField.String() == "" { + instanceSubField.Set(latestSubField) + } + } else if latestSubField.Kind() == reflect.Struct { + mergeStructFields(instanceSubField, latestSubField) + } else { + if reflect.DeepEqual(instanceSubField.Interface(), reflect.Zero(instanceSubField.Type()).Interface()) { + instanceSubField.Set(latestSubField) + } + } + } +} + +func isExported(field reflect.StructField) bool { + return field.PkgPath == "" } // SetDBCSSystem LifeCycle state when state is provisioning -func SetDBCSDatabaseLifecycleState(logger logr.Logger, kubeClient client.Client, dbClient database.DatabaseClient, dbcs *databasev1alpha1.DbcsSystem, nwClient core.VirtualNetworkClient, wrClient workrequests.WorkRequestClient) error { +func SetDBCSDatabaseLifecycleState(logger logr.Logger, kubeClient client.Client, dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem, nwClient core.VirtualNetworkClient, wrClient workrequests.WorkRequestClient) error { dbcsId := *dbcs.Spec.Id @@ -266,47 +902,47 @@ func SetDBCSDatabaseLifecycleState(logger logr.Logger, kubeClient client.Client, // Return if the desired lifecycle state is the same as the current lifecycle state if string(dbcs.Status.State) == string(resp.LifecycleState) { return nil - } else if string(resp.LifecycleState) == string(databasev1alpha1.Available) { + } else if string(resp.LifecycleState) == string(databasev4.Available) { // Change the phase to "Available" - if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev1alpha1.Available, nwClient, wrClient); statusErr != nil { + if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev4.Available, nwClient, wrClient); statusErr != nil { return statusErr } - } else if string(resp.LifecycleState) == string(databasev1alpha1.Provision) { + } else if string(resp.LifecycleState) == string(databasev4.Provision) { // Change the phase to "Provisioning" - if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev1alpha1.Provision, nwClient, wrClient); statusErr != nil { + if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev4.Provision, nwClient, wrClient); statusErr != nil { return statusErr } // Check the State - _, err = CheckResourceState(logger, dbClient, *resp.DbSystem.Id, string(databasev1alpha1.Provision), string(databasev1alpha1.Available)) + _, err = CheckResourceState(logger, dbClient, *resp.DbSystem.Id, string(databasev4.Provision), string(databasev4.Available)) if err != nil { return err } - } else if string(resp.LifecycleState) == string(databasev1alpha1.Update) { + } else if string(resp.LifecycleState) == string(databasev4.Update) { // Change the phase to "Updating" - if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev1alpha1.Update, nwClient, wrClient); statusErr != nil { + if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev4.Update, nwClient, wrClient); statusErr != nil { return statusErr } // Check the State - _, err = CheckResourceState(logger, dbClient, *resp.DbSystem.Id, string(databasev1alpha1.Update), string(databasev1alpha1.Available)) + _, err = CheckResourceState(logger, dbClient, *resp.DbSystem.Id, string(databasev4.Update), string(databasev4.Available)) if err != nil { return err } - } else if string(resp.LifecycleState) == string(databasev1alpha1.Failed) { + } else if string(resp.LifecycleState) == string(databasev4.Failed) { // Change the phase to "Updating" - if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev1alpha1.Failed, nwClient, wrClient); statusErr != nil { + if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev4.Failed, nwClient, wrClient); statusErr != nil { return statusErr } return fmt.Errorf("DbSystem is in Failed State") - } else if string(resp.LifecycleState) == string(databasev1alpha1.Terminated) { + } else if string(resp.LifecycleState) == string(databasev4.Terminated) { // Change the phase to "Terminated" - if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev1alpha1.Terminate, nwClient, wrClient); statusErr != nil { + if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev4.Terminate, nwClient, wrClient); statusErr != nil { return statusErr } } return nil } -func GetDbSystemId(logger logr.Logger, dbClient database.DatabaseClient, dbcs *databasev1alpha1.DbcsSystem) error { +func GetDbSystemId(logger logr.Logger, dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem) error { dbcsId := *dbcs.Spec.Id dbcsReq := database.GetDbSystemRequest{ @@ -353,7 +989,9 @@ func GetDbSystemId(logger logr.Logger, dbClient database.DatabaseClient, dbcs *d } dbcs.Spec.DbSystem.SubnetId = *response.SubnetId dbcs.Spec.DbSystem.AvailabilityDomain = *response.AvailabilityDomain - + if response.KmsKeyId != nil { + dbcs.Status.KMSDetailsStatus.KeyId = *response.KmsKeyId + } err = PopulateDBDetails(logger, dbClient, dbcs) if err != nil { logger.Info("Error Occurred while collecting the DB details") @@ -362,7 +1000,7 @@ func GetDbSystemId(logger logr.Logger, dbClient database.DatabaseClient, dbcs *d return nil } -func PopulateDBDetails(logger logr.Logger, dbClient database.DatabaseClient, dbcs *databasev1alpha1.DbcsSystem) error { +func PopulateDBDetails(logger logr.Logger, dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem) error { listDbHomeRsp, err := GetListDbHomeRsp(logger, dbClient, dbcs) if err != nil { @@ -383,7 +1021,7 @@ func PopulateDBDetails(logger logr.Logger, dbClient database.DatabaseClient, dbc return nil } -func GetListDbHomeRsp(logger logr.Logger, dbClient database.DatabaseClient, dbcs *databasev1alpha1.DbcsSystem) (database.ListDbHomesResponse, error) { +func GetListDbHomeRsp(logger logr.Logger, dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem) (database.ListDbHomesResponse, error) { dbcsId := *dbcs.Spec.Id CompartmentId := dbcs.Spec.DbSystem.CompartmentId @@ -401,7 +1039,7 @@ func GetListDbHomeRsp(logger logr.Logger, dbClient database.DatabaseClient, dbcs return response, nil } -func GetListDatabaseRsp(logger logr.Logger, dbClient database.DatabaseClient, dbcs *databasev1alpha1.DbcsSystem, dbHomeId string) (database.ListDatabasesResponse, error) { +func GetListDatabaseRsp(logger logr.Logger, dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem, dbHomeId string) (database.ListDatabasesResponse, error) { CompartmentId := dbcs.Spec.DbSystem.CompartmentId @@ -418,36 +1056,136 @@ func GetListDatabaseRsp(logger logr.Logger, dbClient database.DatabaseClient, db return response, nil } -func UpdateDbcsSystemIdInst(log logr.Logger, dbClient database.DatabaseClient, dbcs *databasev1alpha1.DbcsSystem, kubeClient client.Client, nwClient core.VirtualNetworkClient, wrClient workrequests.WorkRequestClient) error { - //logger := log.WithName("UpdateDbcsSystemInstance") - +func UpdateDbcsSystemIdInst(log logr.Logger, dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem, kubeClient client.Client, nwClient core.VirtualNetworkClient, wrClient workrequests.WorkRequestClient, databaseID string) error { + // log.Info("Existing DB System Getting Updated with new details in UpdateDbcsSystemIdInst") + var err error updateFlag := false updateDbcsDetails := database.UpdateDbSystemDetails{} - oldSpec, err := dbcs.GetLastSuccessfulSpec() + // log.Info("Current annotations", "annotations", dbcs.GetAnnotations()) + oldSpec, err := dbcs.GetLastSuccessfulSpecWithLog(log) // Use the new method if err != nil { + log.Error(err, "Failed to get last successful spec") return err } - if dbcs.Spec.DbSystem.CpuCoreCount > 0 && dbcs.Spec.DbSystem.CpuCoreCount != oldSpec.DbSystem.CpuCoreCount { + if oldSpec == nil { + log.Info("oldSpec is nil") + } else { + log.Info("Details of oldSpec", "oldSpec", oldSpec) + } + log.Info("Details of updateFlag -> " + fmt.Sprint(updateFlag)) + + if dbcs.Spec.DbSystem.CpuCoreCount > 0 && ((dbcs.Spec.DbSystem.CpuCoreCount != oldSpec.DbSystem.CpuCoreCount) || (dbcs.Spec.DbSystem.CpuCoreCount != *&dbcs.Status.CpuCoreCount)) { + log.Info("DB System cpu core count is: " + fmt.Sprint(dbcs.Spec.DbSystem.CpuCoreCount) + " DB System old cpu count is: " + fmt.Sprint(oldSpec.DbSystem.CpuCoreCount)) updateDbcsDetails.CpuCoreCount = common.Int(dbcs.Spec.DbSystem.CpuCoreCount) updateFlag = true } - if dbcs.Spec.DbSystem.Shape != "" && dbcs.Spec.DbSystem.Shape != oldSpec.DbSystem.Shape { + if dbcs.Spec.DbSystem.Shape != "" && ((dbcs.Spec.DbSystem.Shape != oldSpec.DbSystem.Shape) || (dbcs.Spec.DbSystem.Shape != *dbcs.Status.Shape)) { + // log.Info("DB System desired shape is :" + string(dbcs.Spec.DbSystem.Shape) + "DB System old shape is " + string(oldSpec.DbSystem.Shape)) updateDbcsDetails.Shape = common.String(dbcs.Spec.DbSystem.Shape) updateFlag = true } - if dbcs.Spec.DbSystem.LicenseModel != "" && dbcs.Spec.DbSystem.LicenseModel != oldSpec.DbSystem.LicenseModel { + if dbcs.Spec.DbSystem.LicenseModel != "" && ((dbcs.Spec.DbSystem.LicenseModel != oldSpec.DbSystem.LicenseModel) || (dbcs.Spec.DbSystem.LicenseModel != *&dbcs.Status.LicenseModel)) { licenceModel := getLicenceModel(dbcs) + // log.Info("DB System desired License Model is :" + string(dbcs.Spec.DbSystem.LicenseModel) + "DB Sytsem old License Model is " + string(oldSpec.DbSystem.LicenseModel)) updateDbcsDetails.LicenseModel = database.UpdateDbSystemDetailsLicenseModelEnum(licenceModel) updateFlag = true } if dbcs.Spec.DbSystem.InitialDataStorageSizeInGB != 0 && dbcs.Spec.DbSystem.InitialDataStorageSizeInGB != oldSpec.DbSystem.InitialDataStorageSizeInGB { + // log.Info("DB System desired Storage Size is :" + fmt.Sprint(dbcs.Spec.DbSystem.InitialDataStorageSizeInGB) + "DB System old Storage Size is " + fmt.Sprint(oldSpec.DbSystem.InitialDataStorageSizeInGB)) updateDbcsDetails.DataStorageSizeInGBs = &dbcs.Spec.DbSystem.InitialDataStorageSizeInGB updateFlag = true } + // // Check and update KMS details if necessary + if (dbcs.Spec.KMSConfig != databasev4.KMSConfig{}) { + if dbcs.Spec.KMSConfig != oldSpec.DbSystem.KMSConfig { + log.Info("Updating KMS details in Existing Database") + + kmsKeyID := dbcs.Status.KMSDetailsStatus.KeyId + vaultID := dbcs.Status.KMSDetailsStatus.VaultId + tdeWalletPassword := "" + if dbcs.Spec.DbSystem.TdeWalletPasswordSecret != "" { + tdeWalletPassword, err = GetTdePassword(kubeClient, dbcs) + if err != nil { + log.Error(err, "Failed to get TDE wallet password") + } + } else { + log.Info("Its mandatory to define Tde wallet password when KMS Vault is defined. Not updating existing database") + return nil + } + dbAdminPassword := "" + if dbcs.Spec.DbSystem.DbAdminPasswordSecret != "" { + dbAdminPassword, err = GetAdminPassword(kubeClient, dbcs) + if err != nil { + log.Error(err, "Failed to get DB Admin password") + } + } + + // Assign all available fields to KMSConfig + dbcs.Spec.DbSystem.KMSConfig = databasev4.KMSConfig{ + VaultName: dbcs.Spec.KMSConfig.VaultName, + CompartmentId: dbcs.Spec.KMSConfig.CompartmentId, + KeyName: dbcs.Spec.KMSConfig.KeyName, + EncryptionAlgo: dbcs.Spec.KMSConfig.EncryptionAlgo, + VaultType: dbcs.Spec.KMSConfig.VaultType, + } + + // Create the migrate vault key request + migrateRequest := database.MigrateVaultKeyRequest{ + DatabaseId: common.String(databaseID), + MigrateVaultKeyDetails: database.MigrateVaultKeyDetails{ + KmsKeyId: common.String(kmsKeyID), + VaultId: common.String(vaultID), + }, + } + if tdeWalletPassword != "" { + migrateRequest.TdeWalletPassword = common.String(tdeWalletPassword) + } + if dbAdminPassword != "" { + migrateRequest.AdminPassword = common.String(dbAdminPassword) + } + // Change the phase to "Updating" + if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev4.Update, nwClient, wrClient); statusErr != nil { + return statusErr + } + // Send the request + migrateResponse, err := dbClient.MigrateVaultKey(context.TODO(), migrateRequest) + if err != nil { + log.Error(err, "Failed to migrate vault key") + return err + } + + // // Check for additional response details (if any) + if migrateResponse.RawResponse.StatusCode != 200 { + log.Error(fmt.Errorf("unexpected status code"), "Migrate vault key request failed", "StatusCode", migrateResponse.RawResponse.StatusCode) + return fmt.Errorf("MigrateVaultKey request failed with status code %d", migrateResponse.RawResponse.StatusCode) + } + + log.Info("MigrateVaultKey request succeeded, waiting for database to reach the desired state") + + // // Wait for the database to reach the desired state after migration, timeout for 2 hours + // Define timeout and check interval + timeout := 2 * time.Hour + checkInterval := 1 * time.Minute + + err = WaitForDatabaseState(log, dbClient, databaseID, "AVAILABLE", timeout, checkInterval) + if err != nil { + log.Error(err, "Database did not reach the desired state within the timeout period") + return err + } + // Change the phase to "Available" + if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev4.Available, nwClient, wrClient); statusErr != nil { + return statusErr + } + + log.Info("KMS migration process completed successfully") + } + } + + log.Info("Details of updateFlag after validations is " + fmt.Sprint(updateFlag)) if updateFlag { updateDbcsRequest := database.UpdateDbSystemRequest{ DbSystemId: common.String(*dbcs.Spec.Id), @@ -459,7 +1197,7 @@ func UpdateDbcsSystemIdInst(log logr.Logger, dbClient database.DatabaseClient, d } // Change the phase to "Provisioning" - if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev1alpha1.Update, nwClient, wrClient); statusErr != nil { + if statusErr := SetLifecycleState(kubeClient, dbClient, dbcs, databasev4.Update, nwClient, wrClient); statusErr != nil { return statusErr } // Check the State @@ -467,13 +1205,58 @@ func UpdateDbcsSystemIdInst(log logr.Logger, dbClient database.DatabaseClient, d if err != nil { return err } - } return nil } -func UpdateDbcsSystemId(kubeClient client.Client, dbcs *databasev1alpha1.DbcsSystem) error { +func WaitForDatabaseState( + log logr.Logger, + dbClient database.DatabaseClient, + databaseId string, + desiredState database.DbHomeLifecycleStateEnum, + timeout time.Duration, + checkInterval time.Duration, +) error { + // Set a deadline for the timeout + deadline := time.Now().Add(timeout) + + log.Info("Starting to wait for the database to reach the desired state", "DatabaseID", databaseId, "DesiredState", desiredState, "Timeout", timeout) + + for time.Now().Before(deadline) { + // Prepare the request to fetch database details + getDatabaseReq := database.GetDatabaseRequest{ + DatabaseId: &databaseId, + } + + // Fetch database details + databaseResp, err := dbClient.GetDatabase(context.TODO(), getDatabaseReq) + if err != nil { + log.Error(err, "Failed to get database details", "DatabaseID", databaseId) + return err + } + + // Log the current database state + log.Info("Database State", "DatabaseID", databaseId, "CurrentState", databaseResp.LifecycleState) + + // Check if the database has reached the desired state + if databaseResp.LifecycleState == database.DatabaseLifecycleStateEnum(desiredState) { + log.Info("Database reached the desired state", "DatabaseID", databaseId, "State", desiredState) + return nil + } + + // Wait for the specified interval before checking again + log.Info("Database not in the desired state yet, waiting...", "DatabaseID", databaseId, "CurrentState", databaseResp.LifecycleState, "DesiredState", desiredState, "NextCheckIn", checkInterval) + time.Sleep(checkInterval) + } + + // Return an error if the timeout is reached + err := fmt.Errorf("timed out waiting for database to reach the desired state: %s", desiredState) + log.Error(err, "Timeout reached while waiting for the database to reach the desired state", "DatabaseID", databaseId) + return err +} + +func UpdateDbcsSystemId(kubeClient client.Client, dbcs *databasev4.DbcsSystem) error { payload := []annotations.PatchValue{{ Op: "replace", Path: "/spec/details", @@ -491,7 +1274,6 @@ func UpdateDbcsSystemId(kubeClient client.Client, dbcs *databasev1alpha1.DbcsSys func CheckResourceState(logger logr.Logger, dbClient database.DatabaseClient, Id string, currentState string, expectedState string) (string, error) { // The database OCID is not available when the provisioning is onging. // Retry until the new DbcsSystem is ready. - // Retry up to 18 times every 10 seconds. var state string var err error @@ -534,7 +1316,12 @@ func GetResourceState(logger logr.Logger, dbClient database.DatabaseClient, Id s return state, nil } -func SetDBCSStatus(dbClient database.DatabaseClient, dbcs *databasev1alpha1.DbcsSystem, nwClient core.VirtualNetworkClient, wrClient workrequests.WorkRequestClient) error { +func SetDBCSStatus(dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem, nwClient core.VirtualNetworkClient, wrClient workrequests.WorkRequestClient) error { + + if dbcs.Spec.Id == nil { + dbcs.Status.State = "FAILED" + return nil + } if dbcs.Spec.Id == nil { dbcs.Status.State = "FAILED" @@ -571,6 +1358,15 @@ func SetDBCSStatus(dbClient database.DatabaseClient, dbcs *databasev1alpha1.Dbcs dbcs.Status.Network.ListenerPort = resp.ListenerPort dbcs.Status.Network.HostName = *resp.Hostname dbcs.Status.Network.DomainName = *resp.Domain + if dbcs.Spec.KMSConfig.CompartmentId != "" { + dbcs.Status.KMSDetailsStatus.CompartmentId = dbcs.Spec.KMSConfig.CompartmentId + dbcs.Status.KMSDetailsStatus.VaultName = dbcs.Spec.KMSConfig.VaultName + } + dbcs.Status.State = databasev4.LifecycleState(resp.LifecycleState) + if dbcs.Spec.KMSConfig.CompartmentId != "" { + dbcs.Status.KMSDetailsStatus.CompartmentId = dbcs.Spec.KMSConfig.CompartmentId + dbcs.Status.KMSDetailsStatus.VaultName = dbcs.Spec.KMSConfig.VaultName + } sname, vcnId, err := getSubnetName(*resp.SubnetId, nwClient) @@ -585,7 +1381,7 @@ func SetDBCSStatus(dbClient database.DatabaseClient, dbcs *databasev1alpha1.Dbcs } // Work Request Ststaus - dbWorkRequest := databasev1alpha1.DbWorkrequests{} + dbWorkRequest := databasev4.DbWorkrequests{} dbWorks, err := getWorkRequest(*resp.OpcRequestId, wrClient, dbcs) if err == nil { @@ -605,11 +1401,11 @@ func SetDBCSStatus(dbClient database.DatabaseClient, dbcs *databasev1alpha1.Dbcs dbWorkRequest.TimeStarted = dbWork.TimeStarted.String() } - if dbWorkRequest != (databasev1alpha1.DbWorkrequests{}) { + if dbWorkRequest != (databasev4.DbWorkrequests{}) { status := checkValue(dbcs, dbWork.Id) if status == 0 { dbcs.Status.WorkRequests = append(dbcs.Status.WorkRequests, dbWorkRequest) - dbWorkRequest = databasev1alpha1.DbWorkrequests{} + dbWorkRequest = databasev4.DbWorkrequests{} } else { setValue(dbcs, dbWorkRequest) } @@ -620,7 +1416,7 @@ func SetDBCSStatus(dbClient database.DatabaseClient, dbcs *databasev1alpha1.Dbcs // DB Home Status dbcs.Status.DbInfo = dbcs.Status.DbInfo[:0] - dbStatus := databasev1alpha1.DbStatus{} + dbStatus := databasev4.DbStatus{} dbHomes, err := getDbHomeList(dbClient, dbcs) @@ -636,14 +1432,14 @@ func SetDBCSStatus(dbClient database.DatabaseClient, dbcs *databasev1alpha1.Dbcs dbStatus.DbWorkload = *dbDetail.DbWorkload } dbcs.Status.DbInfo = append(dbcs.Status.DbInfo, dbStatus) - dbStatus = databasev1alpha1.DbStatus{} + dbStatus = databasev4.DbStatus{} } } } return nil } -func getDbHomeList(dbClient database.DatabaseClient, dbcs *databasev1alpha1.DbcsSystem) ([]database.DbHomeSummary, error) { +func getDbHomeList(dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem) ([]database.DbHomeSummary, error) { var items []database.DbHomeSummary dbcsId := *dbcs.Spec.Id @@ -661,7 +1457,7 @@ func getDbHomeList(dbClient database.DatabaseClient, dbcs *databasev1alpha1.Dbcs return resp.Items, nil } -func getDList(dbClient database.DatabaseClient, dbcs *databasev1alpha1.DbcsSystem, dbHomeId *string) ([]database.DatabaseSummary, error) { +func getDList(dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem, dbHomeId *string) ([]database.DatabaseSummary, error) { dbcsId := *dbcs.Spec.Id var items []database.DatabaseSummary @@ -710,7 +1506,7 @@ func getVcnName(vcnId *string, nwClient core.VirtualNetworkClient) (*string, err } // =========== validate Specs ============ -func ValidateSpex(logger logr.Logger, kubeClient client.Client, dbClient database.DatabaseClient, dbcs *databasev1alpha1.DbcsSystem, nwClient core.VirtualNetworkClient, eRecord record.EventRecorder) error { +func ValidateSpex(logger logr.Logger, kubeClient client.Client, dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem, nwClient core.VirtualNetworkClient, eRecord record.EventRecorder) error { //var str1 string var eventMsg string diff --git a/commons/dbcssystem/dcommon.go b/commons/dbcssystem/dcommon.go index 3af3a6b4..beaa7c38 100644 --- a/commons/dbcssystem/dcommon.go +++ b/commons/dbcssystem/dcommon.go @@ -49,10 +49,10 @@ import ( "github.com/oracle/oci-go-sdk/v65/workrequests" "sigs.k8s.io/controller-runtime/pkg/client" - databasev1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + databasev4 "github.com/oracle/oracle-database-operator/apis/database/v4" ) -func GetDbHomeDetails(kubeClient client.Client, dbClient database.DatabaseClient, dbcs *databasev1alpha1.DbcsSystem) (database.CreateDbHomeDetails, error) { +func GetDbHomeDetails(kubeClient client.Client, dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem) (database.CreateDbHomeDetails, error) { dbHomeDetails := database.CreateDbHomeDetails{} @@ -72,7 +72,7 @@ func GetDbHomeDetails(kubeClient client.Client, dbClient database.DatabaseClient return dbHomeDetails, nil } -func GetDbLatestVersion(dbClient database.DatabaseClient, dbcs *databasev1alpha1.DbcsSystem, dbSystemId string) (string, error) { +func GetDbLatestVersion(dbClient database.DatabaseClient, dbcs *databasev4.DbcsSystem, dbSystemId string) (string, error) { //var provisionedDbcsSystemId string ctx := context.TODO() @@ -105,27 +105,34 @@ func GetDbLatestVersion(dbClient database.DatabaseClient, dbcs *databasev1alpha1 s2 := getStr(dbcs.Spec.DbSystem.DbVersion, 2) if strings.EqualFold(s1, s2) { val, _ = strconv.Atoi(s1) - if val >= 18 { + if val >= 18 && val <= 21 { s3 := s1 + "c" if strings.EqualFold(s3, dbcs.Spec.DbSystem.DbVersion) { sFlag = 1 break } + } else if val >= 23 { + s3 := s1 + "ai" + if strings.EqualFold(s3, dbcs.Spec.DbSystem.DbVersion) { + sFlag = 1 + break + } + } else if val < 18 && val >= 11 { + s4 := getStr(*version.Version, 4) + if strings.EqualFold(s4, dbcs.Spec.DbSystem.DbVersion) { + sFlag = 1 + break + } } - } else if val < 18 && val >= 11 { - s4 := getStr(*version.Version, 4) - if strings.EqualFold(s4, dbcs.Spec.DbSystem.DbVersion) { - sFlag = 1 - break - } - } + } } } if sFlag == 1 { return *version.Version, nil } + return *version.Version, fmt.Errorf("no database version matched") } @@ -133,7 +140,7 @@ func getStr(str1 string, num int) string { return str1[0:num] } -func GetDBDetails(kubeClient client.Client, dbcs *databasev1alpha1.DbcsSystem) (database.CreateDatabaseDetails, error) { +func GetDBDetails(kubeClient client.Client, dbcs *databasev4.DbcsSystem) (database.CreateDatabaseDetails, error) { dbDetails := database.CreateDatabaseDetails{} var val database.CreateDatabaseDetailsDbWorkloadEnum @@ -188,7 +195,7 @@ func GetDBDetails(kubeClient client.Client, dbcs *databasev1alpha1.DbcsSystem) ( return dbDetails, nil } -func getBackupConfig(kubeClient client.Client, dbcs *databasev1alpha1.DbcsSystem) (database.DbBackupConfig, error) { +func getBackupConfig(kubeClient client.Client, dbcs *databasev4.DbcsSystem) (database.DbBackupConfig, error) { backupConfig := database.DbBackupConfig{} if dbcs.Spec.DbSystem.DbBackupConfig.AutoBackupEnabled != nil { @@ -216,7 +223,7 @@ func getBackupConfig(kubeClient client.Client, dbcs *databasev1alpha1.DbcsSystem return backupConfig, nil } -func getBackupWindowEnum(dbcs *databasev1alpha1.DbcsSystem) (database.DbBackupConfigAutoBackupWindowEnum, error) { +func getBackupWindowEnum(dbcs *databasev4.DbcsSystem) (database.DbBackupConfigAutoBackupWindowEnum, error) { if strings.ToUpper(*dbcs.Spec.DbSystem.DbBackupConfig.AutoBackupWindow) == "SLOT_ONE" { return database.DbBackupConfigAutoBackupWindowOne, nil @@ -251,7 +258,7 @@ func getBackupWindowEnum(dbcs *databasev1alpha1.DbcsSystem) (database.DbBackupCo //return database.DbBackupConfigAutoBackupWindowEight, fmt.Errorf("AutoBackupWindow values can be SLOT_ONE|SLOT_TWO|SLOT_THREE|SLOT_FOUR|SLOT_FIVE|SLOT_SIX|SLOT_SEVEN|SLOT_EIGHT|SLOT_NINE|SLOT_TEN|SLOT_ELEVEN|SLOT_TWELEVE. The current value set to " + *dbcs.Spec.DbSystem.DbBackupConfig.AutoBackupWindow) } -func getRecoveryWindowsInDays(dbcs *databasev1alpha1.DbcsSystem) (int, error) { +func getRecoveryWindowsInDays(dbcs *databasev4.DbcsSystem) (int, error) { var days int @@ -274,7 +281,7 @@ func getRecoveryWindowsInDays(dbcs *databasev1alpha1.DbcsSystem) (int, error) { } func GetDBSystemopts( - dbcs *databasev1alpha1.DbcsSystem) database.DbSystemOptions { + dbcs *databasev4.DbcsSystem) database.DbSystemOptions { dbSystemOpt := database.DbSystemOptions{} @@ -294,7 +301,7 @@ func GetDBSystemopts( return dbSystemOpt } -func getLicenceModel(dbcs *databasev1alpha1.DbcsSystem) database.DbSystemLicenseModelEnum { +func getLicenceModel(dbcs *databasev4.DbcsSystem) database.DbSystemLicenseModelEnum { if dbcs.Spec.DbSystem.LicenseModel == "BRING_YOUR_OWN_LICENSE" { return database.DbSystemLicenseModelBringYourOwnLicense @@ -302,7 +309,7 @@ func getLicenceModel(dbcs *databasev1alpha1.DbcsSystem) database.DbSystemLicense return database.DbSystemLicenseModelLicenseIncluded } -func getDbWorkLoadType(dbcs *databasev1alpha1.DbcsSystem) (database.CreateDatabaseDetailsDbWorkloadEnum, error) { +func getDbWorkLoadType(dbcs *databasev4.DbcsSystem) (database.CreateDatabaseDetailsDbWorkloadEnum, error) { if strings.ToUpper(dbcs.Spec.DbSystem.DbWorkload) == "OLTP" { @@ -317,7 +324,7 @@ func getDbWorkLoadType(dbcs *databasev1alpha1.DbcsSystem) (database.CreateDataba } func GetNodeCount( - dbcs *databasev1alpha1.DbcsSystem) int { + dbcs *databasev4.DbcsSystem) int { if dbcs.Spec.DbSystem.NodeCount != nil { return *dbcs.Spec.DbSystem.NodeCount @@ -327,7 +334,7 @@ func GetNodeCount( } func GetInitialStorage( - dbcs *databasev1alpha1.DbcsSystem) int { + dbcs *databasev4.DbcsSystem) int { if dbcs.Spec.DbSystem.InitialDataStorageSizeInGB > 0 { return dbcs.Spec.DbSystem.InitialDataStorageSizeInGB @@ -335,7 +342,7 @@ func GetInitialStorage( return 256 } -func GetDBEdition(dbcs *databasev1alpha1.DbcsSystem) database.LaunchDbSystemDetailsDatabaseEditionEnum { +func GetDBEdition(dbcs *databasev4.DbcsSystem) database.LaunchDbSystemDetailsDatabaseEditionEnum { if dbcs.Spec.DbSystem.ClusterName != "" { return database.LaunchDbSystemDetailsDatabaseEditionEnterpriseEditionExtremePerformance @@ -360,7 +367,7 @@ func GetDBEdition(dbcs *databasev1alpha1.DbcsSystem) database.LaunchDbSystemDeta } func GetDBbDiskRedundancy( - dbcs *databasev1alpha1.DbcsSystem) database.LaunchDbSystemDetailsDiskRedundancyEnum { + dbcs *databasev4.DbcsSystem) database.LaunchDbSystemDetailsDiskRedundancyEnum { if dbcs.Spec.DbSystem.ClusterName != "" { return database.LaunchDbSystemDetailsDiskRedundancyHigh @@ -376,7 +383,7 @@ func GetDBbDiskRedundancy( return database.LaunchDbSystemDetailsDiskRedundancyNormal } -func getWorkRequest(workId string, wrClient workrequests.WorkRequestClient, dbcs *databasev1alpha1.DbcsSystem) ([]workrequests.WorkRequestSummary, error) { +func getWorkRequest(workId string, wrClient workrequests.WorkRequestClient, dbcs *databasev4.DbcsSystem) ([]workrequests.WorkRequestSummary, error) { var workReq []workrequests.WorkRequestSummary req := workrequests.ListWorkRequestsRequest{CompartmentId: &dbcs.Spec.DbSystem.CompartmentId, OpcRequestId: &workId, ResourceId: dbcs.Spec.Id} @@ -405,10 +412,10 @@ func GetFmtStr(pstr string) string { return "[" + pstr + "]" } -func checkValue(dbcs *databasev1alpha1.DbcsSystem, workId *string) int { +func checkValue(dbcs *databasev4.DbcsSystem, workId *string) int { var status int = 0 - //dbWorkRequest := databasev1alpha1.DbWorkrequests{} + //dbWorkRequest := databasev4.DbWorkrequests{} if len(dbcs.Status.WorkRequests) > 0 { for _, v := range dbcs.Status.WorkRequests { @@ -420,10 +427,10 @@ func checkValue(dbcs *databasev1alpha1.DbcsSystem, workId *string) int { return status } -func setValue(dbcs *databasev1alpha1.DbcsSystem, dbWorkRequest databasev1alpha1.DbWorkrequests) { +func setValue(dbcs *databasev4.DbcsSystem, dbWorkRequest databasev4.DbWorkrequests) { //var status int = 1 - //dbWorkRequest := databasev1alpha1.DbWorkrequests{} + //dbWorkRequest := databasev4.DbWorkrequests{} var counter int = 0 if len(dbcs.Status.WorkRequests) > 0 { for _, v := range dbcs.Status.WorkRequests { diff --git a/commons/k8s/create.go b/commons/k8s/create.go index 5055bc0e..cd836af7 100644 --- a/commons/k8s/create.go +++ b/commons/k8s/create.go @@ -41,7 +41,7 @@ package k8s import ( "context" - dbv1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" "github.com/oracle/oci-go-sdk/v65/common" "github.com/oracle/oci-go-sdk/v65/database" @@ -78,25 +78,28 @@ func CreateSecret(kubeClient client.Client, namespace string, name string, data func CreateAutonomousBackup(kubeClient client.Client, backupName string, backupSummary database.AutonomousDatabaseBackupSummary, - ownerADB *dbv1alpha1.AutonomousDatabase) error { + ownerAdb *dbv4.AutonomousDatabase) error { - backup := &dbv1alpha1.AutonomousDatabaseBackup{ + backup := &dbv4.AutonomousDatabaseBackup{ ObjectMeta: metav1.ObjectMeta{ - Namespace: ownerADB.GetNamespace(), + Namespace: ownerAdb.GetNamespace(), Name: backupName, - OwnerReferences: NewOwnerReference(ownerADB), + OwnerReferences: NewOwnerReference(ownerAdb), + Labels: map[string]string{ + "adb": ownerAdb.Name, + }, }, - Spec: dbv1alpha1.AutonomousDatabaseBackupSpec{ - Target: dbv1alpha1.TargetSpec{ - K8sADB: dbv1alpha1.K8sADBSpec{ - Name: common.String(ownerADB.Name), + Spec: dbv4.AutonomousDatabaseBackupSpec{ + Target: dbv4.TargetSpec{ + K8sAdb: dbv4.K8sAdbSpec{ + Name: common.String(ownerAdb.Name), }, }, DisplayName: backupSummary.DisplayName, AutonomousDatabaseBackupOCID: backupSummary.Id, - OCIConfig: dbv1alpha1.OCIConfigSpec{ - ConfigMapName: ownerADB.Spec.OCIConfig.ConfigMapName, - SecretName: ownerADB.Spec.OCIConfig.SecretName, + OCIConfig: dbv4.OciConfigSpec{ + ConfigMapName: ownerAdb.Spec.OciConfig.ConfigMapName, + SecretName: ownerAdb.Spec.OciConfig.SecretName, }, }, } diff --git a/commons/k8s/fetch.go b/commons/k8s/fetch.go index 05792cad..617abdb5 100644 --- a/commons/k8s/fetch.go +++ b/commons/k8s/fetch.go @@ -44,10 +44,11 @@ import ( corev1 "k8s.io/api/core/v1" apiErrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - dbv1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" ) func FetchResource(kubeClient client.Client, namespace string, name string, object client.Object) error { @@ -64,16 +65,16 @@ func FetchResource(kubeClient client.Client, namespace string, name string, obje // Returns the first AutonomousDatabase resource that matches the AutonomousDatabaseOCID of the backup // Sometimes the AutonomousDatabase doesn't exist. It could happen if a user simply want to restore or -// backup the ADB without creating an ADB rersource in the cluster. +// backup the AutonomousDatabase without creating an AutonomousDatabase rersource in the cluster. // If there isn't an AutonomousDatabase with the same OCID, a nil is returned. -func FetchAutonomousDatabaseWithOCID(kubeClient client.Client, namespace string, ocid string) (*dbv1alpha1.AutonomousDatabase, error) { +func FetchAutonomousDatabaseWithOCID(kubeClient client.Client, namespace string, ocid string) (*dbv4.AutonomousDatabase, error) { adbList, err := fetchAutonomousDatabases(kubeClient, namespace) if err != nil { return nil, err } for _, adb := range adbList.Items { - if adb.Spec.Details.AutonomousDatabaseOCID != nil && *adb.Spec.Details.AutonomousDatabaseOCID == ocid { + if adb.Spec.Details.Id != nil && *adb.Spec.Details.Id == ocid { return &adb, nil } } @@ -81,9 +82,9 @@ func FetchAutonomousDatabaseWithOCID(kubeClient client.Client, namespace string, return nil, nil } -func fetchAutonomousDatabases(kubeClient client.Client, namespace string) (*dbv1alpha1.AutonomousDatabaseList, error) { +func fetchAutonomousDatabases(kubeClient client.Client, namespace string) (*dbv4.AutonomousDatabaseList, error) { // Get the list of AutonomousDatabaseBackupOCID in the same namespace - adbList := &dbv1alpha1.AutonomousDatabaseList{} + adbList := &dbv4.AutonomousDatabaseList{} if err := kubeClient.List(context.TODO(), adbList, &client.ListOptions{Namespace: namespace}); err != nil { // Ignore not-found errors, since they can't be fixed by an immediate requeue. @@ -96,11 +97,20 @@ func fetchAutonomousDatabases(kubeClient client.Client, namespace string) (*dbv1 return adbList, nil } -func FetchAutonomousDatabaseBackups(kubeClient client.Client, namespace string) (*dbv1alpha1.AutonomousDatabaseBackupList, error) { +func FetchAutonomousDatabaseBackups(kubeClient client.Client, namespace string, adbName string) (*dbv4.AutonomousDatabaseBackupList, error) { // Get the list of AutonomousDatabaseBackupOCID in the same namespace - backupList := &dbv1alpha1.AutonomousDatabaseBackupList{} - - if err := kubeClient.List(context.TODO(), backupList, &client.ListOptions{Namespace: namespace}); err != nil { + backupList := &dbv4.AutonomousDatabaseBackupList{} + + // Create a label selector + selector := labels.Set{"adb": adbName}.AsSelector() + + if err := kubeClient.List( + context.TODO(), + backupList, + &client.ListOptions{ + Namespace: namespace, + LabelSelector: selector, + }); err != nil { // Ignore not-found errors, since they can't be fixed by an immediate requeue. // No need to change the since we don't know if we obtain the object. if !apiErrors.IsNotFound(err) { diff --git a/commons/multitenant/lrest/common.go b/commons/multitenant/lrest/common.go new file mode 100644 index 00000000..e72e85b0 --- /dev/null +++ b/commons/multitenant/lrest/common.go @@ -0,0 +1,113 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if + one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. +*/ + +package lrest + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "fmt" + "regexp" + "strings" + + corev1 "k8s.io/api/core/v1" + + ctrl "sigs.k8s.io/controller-runtime" +) + +func CommonDecryptWithPrivKey(Key string, Buffer string, req ctrl.Request) (string, error) { + + Debug := 0 + block, _ := pem.Decode([]byte(Key)) + pkcs8PrivateKey, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + fmt.Printf("Failed to parse private key %s \n", err.Error()) + return "", err + } + if Debug == 1 { + fmt.Printf("======================================\n") + fmt.Printf("%s\n", Key) + fmt.Printf("======================================\n") + } + + encString64, err := base64.StdEncoding.DecodeString(string(Buffer)) + if err != nil { + fmt.Printf("Failed to decode encrypted string to base64: %s\n", err.Error()) + return "", err + } + + decryptedB, err := rsa.DecryptPKCS1v15(nil, pkcs8PrivateKey.(*rsa.PrivateKey), encString64) + if err != nil { + fmt.Printf("Failed to decrypt string %s\n", err.Error()) + return "", err + } + if Debug == 1 { + fmt.Printf("[%s]\n", string(decryptedB)) + } + return strings.TrimSpace(string(decryptedB)), err + +} + +func ParseConfigMapData(cfgmap *corev1.ConfigMap) []string { + + var tokens []string + for Key, Value := range cfgmap.Data { + fmt.Printf("KEY:%s\n", Key) + re0 := regexp.MustCompile("\\n") + re1 := regexp.MustCompile(";") + re2 := regexp.MustCompile(",") /* Additional separator for future use */ + + Value = re0.ReplaceAllString(Value, " ") + tokens = strings.Split(Value, " ") + + for cnt := range tokens { + if len(tokens[cnt]) != 0 { + tokens[cnt] = re1.ReplaceAllString(tokens[cnt], " ") + tokens[cnt] = re2.ReplaceAllString(tokens[cnt], " ") + + } + + } + + } + + return tokens + +} diff --git a/commons/observability/constants.go b/commons/observability/constants.go index 89ecb946..45f06e49 100644 --- a/commons/observability/constants.go +++ b/commons/observability/constants.go @@ -1,6 +1,8 @@ package observability -import "github.com/oracle/oracle-database-operator/apis/observability/v1alpha1" +import ( + v4 "github.com/oracle/oracle-database-operator/apis/observability/v4" +) const ( UnknownValue = "UNKNOWN" @@ -9,9 +11,9 @@ const ( // Observability Status const ( - StatusObservabilityPending v1alpha1.StatusEnum = "PENDING" - StatusObservabilityError v1alpha1.StatusEnum = "ERROR" - StatusObservabilityReady v1alpha1.StatusEnum = "READY" + StatusObservabilityPending v4.StatusEnum = "PENDING" + StatusObservabilityError v4.StatusEnum = "ERROR" + StatusObservabilityReady v4.StatusEnum = "READY" ) // Log Names @@ -27,8 +29,9 @@ const ( DefaultDbUserKey = "username" DefaultDBPasswordKey = "password" DefaultDBConnectionStringKey = "connection" - DefaultLabelKey = "app" DefaultConfigVolumeString = "config-volume" + DefaultLogFilename = "alert.log" + DefaultLogVolumeString = "log-volume" DefaultWalletVolumeString = "creds" DefaultOCIPrivateKeyVolumeString = "ocikey" DefaultOCIConfigFingerprintKey = "fingerprint" @@ -36,10 +39,12 @@ const ( DefaultOCIConfigTenancyKey = "tenancy" DefaultOCIConfigUserKey = "user" - DefaultExporterImage = "container-registry.oracle.com/database/observability-exporter:1.1.0" + DefaultExporterImage = "container-registry.oracle.com/database/observability-exporter:1.5.2" DefaultServicePort = 9161 DefaultServiceTargetPort = 9161 + DefaultAppPort = 8080 DefaultPrometheusPort = "metrics" + DefaultServiceType = "ClusterIP" DefaultReplicaCount = 1 DefaultExporterConfigMountRootPath = "/oracle/observability" DefaultOracleHome = "/lib/oracle/21/client64/lib" @@ -52,12 +57,15 @@ const ( DefaultExporterConfigmapAbsolutePath = DefaultExporterConfigMountRootPath + "/" + DefaultExporterConfigmapFilename ) -// default resource prefixes +// labeling +const ( + DefaultSelectorLabelKey = "app" + DefaultReleaseLabelKey = "release" +) + +// default resource const ( - DefaultServiceMonitorPrefix = "obs-servicemonitor-" - DefaultLabelPrefix = "obs-" - DefaultExporterDeploymentPrefix = "obs-deploy-" - DefaultExporterContainerName = "observability-exporter" + DefaultExporterContainerName = "observability-exporter" ) // Known environment variables @@ -66,6 +74,7 @@ const ( EnvVarDataSourceUser = "DB_USERNAME" EnvVarDataSourcePassword = "DB_PASSWORD" EnvVarDataSourceConnectString = "DB_CONNECT_STRING" + EnvVarDataSourceLogDestination = "LOG_DESTINATION" EnvVarDataSourcePwdVaultSecretName = "VAULT_SECRET_NAME" EnvVarDataSourcePwdVaultId = "VAULT_ID" EnvVarCustomConfigmap = "CUSTOM_METRICS" @@ -93,11 +102,11 @@ const ( ReasonReadyFailed = "ReadinessValidationFailed" ReasonDeploymentSpecValidationFailed = "SpecValidationFailed" - ReasonDeploymentSuccessful = "ResourceDeployed" - ReasonDeploymentUpdated = "ResourceDeploymentUpdated" - ReasonDeploymentUpdateFailed = "ResourceDeploymentUpdateFailed" - ReasonDeploymentFailed = "ResourceDeploymentFailed" - ReasonDeploymentPending = "ResourceDeploymentInProgress" + ReasonDeploymentSuccessful = "ResourceDeployed" + ReasonResourceUpdated = "ResourceUpdated" + ReasonResourceUpdateFailed = "ResourceUpdateFailed" + ReasonDeploymentFailed = "ResourceDeploymentFailed" + ReasonDeploymentPending = "ResourceDeploymentInProgress" ReasonGeneralResourceGenerationFailed = "ResourceGenerationFailed" ReasonGeneralResourceCreated = "ResourceCreated" @@ -112,18 +121,19 @@ const ( ErrorStatusUpdate = "an error occurred with updating the cr status" ErrorSpecValidationFailedDueToAnError = "an error occurred with validating the exporter deployment spec" ErrorDeploymentPodsFailure = "an error occurred with deploying exporter deployment pods" - ErrorDeploymentUpdate = "an error occurred with updating exporter deployment" ErrorResourceCreationFailure = "an error occurred with creating databaseobserver resource" ErrorResourceRetrievalFailureDueToAnError = "an error occurred with retrieving databaseobserver resource" + LogErrorWithResourceUpdate = "an error occurred with updating resource" ) // Log Infos const ( - LogCRStart = "Started DatabaseObserver instance reconciliation" - LogCREnd = "Ended DatabaseObserver instance reconciliation, resource must have been deleted." - LogResourceCreated = "Created DatabaseObserver resource successfully" - LogResourceUpdated = "Updated DatabaseObserver resource successfully" - LogResourceFound = "Validated DatabaseObserver resource readiness" + LogCRStart = "Started DatabaseObserver instance reconciliation" + LogCREnd = "Ended DatabaseObserver instance reconciliation, resource must have been deleted." + LogResourceCreated = "Created DatabaseObserver resource successfully" + LogResourceUpdated = "Updated DatabaseObserver resource successfully" + LogResourceFound = "Validated DatabaseObserver resource readiness" + LogSuccessWithResourceUpdate = "Updated DatabaseObserver resource successfully" ) // Messages @@ -140,11 +150,8 @@ const ( MessageResourceGenerationFailed = "Failed to generate resource due to an error" MessageExporterDeploymentSpecValidationFailed = "Failed to validate export deployment spec due to an error with the spec" - MessageExporterDeploymentImageUpdated = "Completed updating exporter deployment image successfully" - MessageExporterDeploymentEnvironmentUpdated = "Completed updating exporter deployment environment values successfully" - MessageExporterDeploymentReplicaUpdated = "Completed updating exporter deployment replicaCount successfully" - MessageExporterDeploymentVolumesUpdated = "Completed updating exporter deployment volumes successfully" - MessageExporterDeploymentUpdateFailed = "Failed to update exporter deployment due to an error" + MessageExporterResourceUpdateFailed = "Failed to update exporter resource due to an error" + MessageExporterResourceUpdated = "Updated exporter resource successfully" MessageExporterDeploymentValidationFailed = "Failed to validate exporter deployment due to an error retrieving resource" MessageExporterDeploymentSuccessful = "Completed validation of exporter deployment readiness" MessageExporterDeploymentFailed = "Failed to deploy exporter deployment due to PodFailure" @@ -158,16 +165,11 @@ const ( EventMessageFailedCRRetrieval = "Encountered error retrieving databaseObserver instance" EventReasonSpecError = "DeploymentSpecValidationFailed" - EventMessageSpecErrorDBPasswordMissing = "Spec validation failed due to missing dbPassword field values" EventMessageSpecErrorDBPasswordSecretMissing = "Spec validation failed due to required dbPassword secret not found" EventMessageSpecErrorDBConnectionStringSecretMissing = "Spec validation failed due to required dbConnectionString secret not found" EventMessageSpecErrorDBPUserSecretMissing = "Spec validation failed due to dbUser secret not found" EventMessageSpecErrorConfigmapMissing = "Spec validation failed due to custom config configmap not found" EventMessageSpecErrorDBWalletSecretMissing = "Spec validation failed due to provided dbWallet secret not found" - EventReasonUpdateSucceeded = "ExporterDeploymentUpdated" - EventMessageUpdatedImageSucceeded = "Exporter deployment image updated successfully" - EventMessageUpdatedEnvironmentSucceeded = "Exporter deployment environment values updated successfully" - EventMessageUpdatedVolumesSucceeded = "Exporter deployment volumes updated successfully" - EventMessageUpdatedReplicaSucceeded = "Exporter deployment replicaCount updated successfully" + EventReasonUpdateSucceeded = "ExporterDeploymentUpdated" ) diff --git a/commons/observability/utils.go b/commons/observability/utils.go index f396b95a..6eccb261 100644 --- a/commons/observability/utils.go +++ b/commons/observability/utils.go @@ -1,87 +1,230 @@ package observability import ( - apiv1 "github.com/oracle/oracle-database-operator/apis/observability/v1alpha1" - appsv1 "k8s.io/api/apps/v1" + api "github.com/oracle/oracle-database-operator/apis/observability/v4" + monitorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "path/filepath" + "strings" ) -// GetExporterLabels function retrieves exporter labels from api or provides default -func GetExporterLabels(api *apiv1.DatabaseObserver) map[string]string { +func AddSidecarContainers(a *api.DatabaseObserver, listing *[]corev1.Container) { + + if containers := a.Spec.ExporterSidecars; len(containers) > 0 { + for _, container := range containers { + *listing = append(*listing, container) + } + + } +} + +func AddSidecarVolumes(a *api.DatabaseObserver, listing *[]corev1.Volume) { + + if volumes := a.Spec.SideCarVolumes; len(volumes) > 0 { + for _, v := range volumes { + *listing = append(*listing, v) + } + + } +} + +// GetLabels retrieves labels from the spec +func GetLabels(a *api.DatabaseObserver, customResourceLabels map[string]string) map[string]string { + var l = make(map[string]string) - if labels := api.Spec.Prometheus.Labels; labels != nil && len(labels) > 0 { - for k, v := range labels { - l[k] = v + // get inherited labels + if iLabels := a.Spec.InheritLabels; iLabels != nil { + for _, v := range iLabels { + if v != DefaultSelectorLabelKey { + l[v] = a.Labels[v] + } + } + } + + if customResourceLabels != nil { + for k, v := range customResourceLabels { + if k != DefaultSelectorLabelKey { + l[k] = v + } } - l["release"] = "stable" - return l } - return map[string]string{ - DefaultLabelKey: DefaultLabelPrefix + api.Name, - "release": "stable", + + // add app label + l[DefaultSelectorLabelKey] = a.Name + return l +} + +// GetSelectorLabel adds selector label +func GetSelectorLabel(a *api.DatabaseObserver) map[string]string { + selectors := make(map[string]string) + selectors[DefaultSelectorLabelKey] = a.Name + return selectors +} + +// GetExporterVersion retrieves version of exporter used +func GetExporterVersion(a *api.DatabaseObserver) string { + appVersion := "latest" + whichImage := DefaultExporterImage + if img := a.Spec.Exporter.Deployment.ExporterImage; img != "" { + whichImage = img } + // return tag in image:tag + if str := strings.Split(whichImage, ":"); len(str) == 2 { + appVersion = str[1] + } + return appVersion +} + +// GetExporterArgs retrieves args +func GetExporterArgs(a *api.DatabaseObserver) []string { + if args := a.Spec.Exporter.Deployment.ExporterArgs; args != nil || len(args) > 0 { + return args + } + return nil } -// GetExporterServicePort function retrieves exporter service port from api or provides default -func GetExporterServicePort(api *apiv1.DatabaseObserver) int32 { - if rPort := api.Spec.Exporter.Service.Port; rPort != 0 { - return rPort +// GetExporterDeploymentSecurityContext retrieves security context for container +func GetExporterDeploymentSecurityContext(a *api.DatabaseObserver) *corev1.SecurityContext { + if sc := a.Spec.Exporter.Deployment.SecurityContext; sc != nil { + return sc } - return int32(DefaultServicePort) + return &corev1.SecurityContext{} } -// GetExporterServiceMonitorPort function retrieves exporter service monitor port from api or provides default -func GetExporterServiceMonitorPort(api *apiv1.DatabaseObserver) string { - if rPort := api.Spec.Prometheus.Port; rPort != "" { - return rPort +// GetExporterPodSecurityContext retrieves security context for pods +func GetExporterPodSecurityContext(a *api.DatabaseObserver) *corev1.PodSecurityContext { + if sc := a.Spec.Exporter.Deployment.DeploymentPodTemplate.SecurityContext; sc != nil { + return sc } - return DefaultPrometheusPort + return &corev1.PodSecurityContext{} +} +// GetExporterCommands retrieves commands +func GetExporterCommands(a *api.DatabaseObserver) []string { + if c := a.Spec.Exporter.Deployment.ExporterCommands; c != nil || len(c) > 0 { + return c + } + return nil } -// GetExporterDeploymentVolumeMounts function retrieves volume mounts from api or provides default -func GetExporterDeploymentVolumeMounts(api *apiv1.DatabaseObserver) []corev1.VolumeMount { +// GetExporterServicePort function retrieves exporter service port from a or provides default +func GetExporterServicePort(a *api.DatabaseObserver) []corev1.ServicePort { + + servicePorts := make([]corev1.ServicePort, 0) + + // get service ports + if ports := a.Spec.Exporter.Service.Ports; len(ports) > 0 { + for _, port := range ports { + servicePorts = append(servicePorts, port) + } + + } else { + // if not, provide default service port + servicePorts = append(servicePorts, corev1.ServicePort{ + Name: DefaultPrometheusPort, + Port: DefaultServicePort, + TargetPort: intstr.FromInt32(DefaultServiceTargetPort), + }) + } + + return servicePorts + +} + +// GetEndpoints function +func GetEndpoints(a *api.DatabaseObserver) []monitorv1.Endpoint { + + endpoints := make([]monitorv1.Endpoint, 0) + + // get endpoints + if es := a.Spec.Prometheus.ServiceMonitor.Endpoints; len(es) > 0 { + for _, e := range es { + endpoints = append(endpoints, e) + } + } + + // if not, provide default endpoint + endpoints = append(endpoints, monitorv1.Endpoint{ + Port: DefaultPrometheusPort, + Interval: "20s", + }) + + return endpoints +} + +func AddNamespaceSelector(a *api.DatabaseObserver, spec *monitorv1.ServiceMonitorSpec) { + + if ns := a.Spec.Prometheus.ServiceMonitor.NamespaceSelector; ns != nil { + a.Spec.Prometheus.ServiceMonitor.NamespaceSelector.DeepCopyInto(&spec.NamespaceSelector) + } + +} + +// GetExporterDeploymentVolumeMounts function retrieves volume mounts from a or provides default +func GetExporterDeploymentVolumeMounts(a *api.DatabaseObserver) []corev1.VolumeMount { volM := make([]corev1.VolumeMount, 0) - if cVolumeSourceName := api.Spec.Exporter.ExporterConfig.Configmap.Name; cVolumeSourceName != "" { + if cVolumeSourceName := a.Spec.ExporterConfig.Configmap.Name; cVolumeSourceName != "" { volM = append(volM, corev1.VolumeMount{ Name: DefaultConfigVolumeString, MountPath: DefaultExporterConfigMountRootPath, }) } - // api.Spec.Database.DBWallet.SecretName optional + // a.Spec.Database.DBWallet.SecretName optional // if null, consider the database NON-ADB and connect as such - if secretName := api.Spec.Database.DBWallet.SecretName; secretName != "" { + if secretName := a.Spec.Database.DBWallet.SecretName; secretName != "" { + + p := DefaultOracleTNSAdmin + + // Determine what the value of TNS_ADMIN + // if custom TNS_ADMIN environment variable is set and found, use that instead as the path + if rCustomEnvs := a.Spec.Exporter.Deployment.ExporterEnvs; rCustomEnvs != nil { + if v, f := rCustomEnvs[EnvVarTNSAdmin]; f { + p = v + } + } + volM = append(volM, corev1.VolumeMount{ Name: DefaultWalletVolumeString, - MountPath: DefaultOracleTNSAdmin, + MountPath: p, }) } - // api.Spec.OCIConfig.SecretName required if vault is used - if secretName := api.Spec.OCIConfig.SecretName; secretName != "" { + // a.Spec.OCIConfig.SecretName required if vault is used + if secretName := a.Spec.OCIConfig.SecretName; secretName != "" { volM = append(volM, corev1.VolumeMount{ Name: DefaultOCIPrivateKeyVolumeString, MountPath: DefaultVaultPrivateKeyRootPath, }) } + + // a.Spec.Log.Path path to mount for a custom log path, a volume is required + if rLogPath := a.Spec.Log.Path; rLogPath != "" { + vName := GetLogName(a) + volM = append(volM, corev1.VolumeMount{ + Name: vName, + MountPath: rLogPath, + }) + } + return volM } -// GetExporterDeploymentVolumes function retrieves volumes from api or provides default -func GetExporterDeploymentVolumes(api *apiv1.DatabaseObserver) []corev1.Volume { +// GetExporterDeploymentVolumes function retrieves volumes from a or provides default +func GetExporterDeploymentVolumes(a *api.DatabaseObserver) []corev1.Volume { vol := make([]corev1.Volume, 0) // config-volume Volume // if null, the exporter uses the default built-in config - if cVolumeSourceName := api.Spec.Exporter.ExporterConfig.Configmap.Name; cVolumeSourceName != "" { + if cVolumeSourceName := a.Spec.ExporterConfig.Configmap.Name; cVolumeSourceName != "" { - cVolumeSourceKey := api.Spec.Exporter.ExporterConfig.Configmap.Key + cVolumeSourceKey := a.Spec.ExporterConfig.Configmap.Key cMSource := &corev1.ConfigMapVolumeSource{ LocalObjectReference: corev1.LocalObjectReference{ Name: cVolumeSourceName, @@ -96,9 +239,9 @@ func GetExporterDeploymentVolumes(api *apiv1.DatabaseObserver) []corev1.Volume { } // creds Volume - // api.Spec.Database.DBWallet.SecretName optional + // a.Spec.Database.DBWallet.SecretName optional // if null, consider the database NON-ADB and connect as such - if secretName := api.Spec.Database.DBWallet.SecretName; secretName != "" { + if secretName := a.Spec.Database.DBWallet.SecretName; secretName != "" { vol = append(vol, corev1.Volume{ Name: DefaultWalletVolumeString, @@ -111,8 +254,8 @@ func GetExporterDeploymentVolumes(api *apiv1.DatabaseObserver) []corev1.Volume { } // ocikey Volume - // api.Spec.Database.DBWallet.SecretName optional - if secretName := api.Spec.OCIConfig.SecretName; secretName != "" { + // a.Spec.Database.DBWallet.SecretName optional + if secretName := a.Spec.OCIConfig.SecretName; secretName != "" { OCIConfigSource := &corev1.SecretVolumeSource{ SecretName: secretName, @@ -127,39 +270,104 @@ func GetExporterDeploymentVolumes(api *apiv1.DatabaseObserver) []corev1.Volume { VolumeSource: corev1.VolumeSource{Secret: OCIConfigSource}, }) } + + // log-volume Volume + if rLogPath := a.Spec.Log.Path; rLogPath != "" { + vs := GetLogVolumeSource(a) + vName := GetLogName(a) + + vol = append(vol, corev1.Volume{ + Name: vName, + VolumeSource: vs, + }) + } + return vol } -// GetExporterSelector function retrieves labels from api or provides default -func GetExporterSelector(api *apiv1.DatabaseObserver) map[string]string { - var s = make(map[string]string) - if labels := api.Spec.Prometheus.Labels; labels != nil && len(labels) > 0 { - for k, v := range labels { - s[k] = v +// GetExporterConfig function retrieves config name for status +func GetExporterConfig(a *api.DatabaseObserver) string { + + configName := DefaultValue + if cmName := a.Spec.ExporterConfig.Configmap.Name; cmName != "" { + configName = cmName + } + + return configName +} + +func GetLogName(a *api.DatabaseObserver) string { + if name := a.Spec.Log.Volume.Name; name != "" { + return name + } + return DefaultLogVolumeString +} + +// GetLogVolumeSource function retrieves the source to help GetExporterDeploymentVolumes +func GetLogVolumeSource(a *api.DatabaseObserver) corev1.VolumeSource { + + vs := corev1.VolumeSource{} + rLogVolumeClaimName := a.Spec.Log.Volume.PersistentVolumeClaim.ClaimName + + // volume claims take precedence + if rLogVolumeClaimName != "" { + vs.PersistentVolumeClaim = &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: rLogVolumeClaimName, } - return s + return vs + + } else { + vs.EmptyDir = &corev1.EmptyDirVolumeSource{} + return vs + } +} +// AddEnv is a helper method that appends an Env Var value +func AddEnv(env []corev1.EnvVar, existing map[string]string, name string, v string) []corev1.EnvVar { + + // Evaluate if env already exists + if _, f := existing[name]; !f { + env = append(env, corev1.EnvVar{Name: name, Value: v}) } - return map[string]string{DefaultLabelKey: DefaultLabelPrefix + api.Name} + return env +} + +// AddEnvFrom is a helper method that appends an Env Var value source +func AddEnvFrom(env []corev1.EnvVar, existing map[string]string, name string, v *corev1.EnvVarSource) []corev1.EnvVar { + // Evaluate if env already exists + if _, f := existing[name]; !f { + env = append(env, corev1.EnvVar{Name: name, ValueFrom: v}) + } + return env } -// GetExporterEnvs function retrieves env from api or provides default -func GetExporterEnvs(api *apiv1.DatabaseObserver) []corev1.EnvVar { +// GetExporterEnvs function retrieves env from a or provides default +func GetExporterEnvs(a *api.DatabaseObserver) []corev1.EnvVar { optional := true - rDBPasswordKey := api.Spec.Database.DBPassword.Key - rDBPasswordName := api.Spec.Database.DBPassword.SecretName - rDBConnectStrKey := api.Spec.Database.DBConnectionString.Key - rDBConnectStrName := api.Spec.Database.DBConnectionString.SecretName - rDBVaultSecretName := api.Spec.Database.DBPassword.VaultSecretName - rDBVaultOCID := api.Spec.Database.DBPassword.VaultOCID - rDBUserSKey := api.Spec.Database.DBUser.Key - rDBUserSName := api.Spec.Database.DBUser.SecretName - rOCIConfigCMName := api.Spec.OCIConfig.ConfigMapName + rDBPasswordKey := a.Spec.Database.DBPassword.Key + rDBPasswordName := a.Spec.Database.DBPassword.SecretName + rDBConnectStrKey := a.Spec.Database.DBConnectionString.Key + rDBConnectStrName := a.Spec.Database.DBConnectionString.SecretName + rDBVaultSecretName := a.Spec.Database.DBPassword.VaultSecretName + rDBVaultOCID := a.Spec.Database.DBPassword.VaultOCID + rDBUserSKey := a.Spec.Database.DBUser.Key + rDBUserSName := a.Spec.Database.DBUser.SecretName + rOCIConfigCMName := a.Spec.OCIConfig.ConfigMapName + rLogPath := a.Spec.Log.Path + rLogFilename := a.Spec.Log.Filename + rCustomEnvs := a.Spec.Exporter.Deployment.ExporterEnvs var env = make([]corev1.EnvVar, 0) + // add CustomEnvs + if rCustomEnvs != nil { + for k, v := range rCustomEnvs { + env = append(env, corev1.EnvVar{Name: k, Value: v}) + } + } + // DB_USERNAME environment variable if rDBUserSKey == "" { // overwrite rDBUserSKey = DefaultDbUserKey @@ -170,7 +378,7 @@ func GetExporterEnvs(api *apiv1.DatabaseObserver) []corev1.EnvVar { LocalObjectReference: corev1.LocalObjectReference{Name: rDBUserSName}, Optional: &optional, }} - env = append(env, corev1.EnvVar{Name: EnvVarDataSourceUser, ValueFrom: envUser}) + env = AddEnvFrom(env, rCustomEnvs, EnvVarDataSourceUser, envUser) // DB_CONNECT_STRING environment variable if rDBConnectStrKey == "" { @@ -182,15 +390,15 @@ func GetExporterEnvs(api *apiv1.DatabaseObserver) []corev1.EnvVar { LocalObjectReference: corev1.LocalObjectReference{Name: rDBConnectStrName}, Optional: &optional, }} - env = append(env, corev1.EnvVar{Name: EnvVarDataSourceConnectString, ValueFrom: envConnectStr}) + env = AddEnvFrom(env, rCustomEnvs, EnvVarDataSourceConnectString, envConnectStr) // DB_PASSWORD environment variable // if useVault, add environment variables for Vault ID and Vault Secret Name useVault := rDBVaultSecretName != "" && rDBVaultOCID != "" if useVault { - env = append(env, corev1.EnvVar{Name: EnvVarDataSourcePwdVaultSecretName, Value: rDBVaultSecretName}) - env = append(env, corev1.EnvVar{Name: EnvVarDataSourcePwdVaultId, Value: rDBVaultOCID}) + env = AddEnv(env, rCustomEnvs, EnvVarDataSourcePwdVaultSecretName, rDBVaultSecretName) + env = AddEnv(env, rCustomEnvs, EnvVarDataSourcePwdVaultId, rDBVaultOCID) // Configuring the configProvider prefixed with vault_ // https://github.com/oracle/oracle-db-appdev-monitoring/blob/main/vault/vault.go @@ -222,12 +430,11 @@ func GetExporterEnvs(api *apiv1.DatabaseObserver) []corev1.EnvVar { Optional: &optional, }, } - - env = append(env, corev1.EnvVar{Name: EnvVarVaultFingerprint, ValueFrom: configSourceFingerprintValue}) - env = append(env, corev1.EnvVar{Name: EnvVarVaultUserOCID, ValueFrom: configSourceUserValue}) - env = append(env, corev1.EnvVar{Name: EnvVarVaultTenancyOCID, ValueFrom: configSourceTenancyValue}) - env = append(env, corev1.EnvVar{Name: EnvVarVaultRegion, ValueFrom: configSourceRegionValue}) - env = append(env, corev1.EnvVar{Name: EnvVarVaultPrivateKeyPath, Value: DefaultVaultPrivateKeyAbsolutePath}) + env = AddEnvFrom(env, rCustomEnvs, EnvVarVaultFingerprint, configSourceFingerprintValue) + env = AddEnvFrom(env, rCustomEnvs, EnvVarVaultUserOCID, configSourceUserValue) + env = AddEnvFrom(env, rCustomEnvs, EnvVarVaultTenancyOCID, configSourceTenancyValue) + env = AddEnvFrom(env, rCustomEnvs, EnvVarVaultRegion, configSourceRegionValue) + env = AddEnv(env, rCustomEnvs, EnvVarVaultPrivateKeyPath, DefaultVaultPrivateKeyAbsolutePath) } else { @@ -241,162 +448,46 @@ func GetExporterEnvs(api *apiv1.DatabaseObserver) []corev1.EnvVar { Optional: &optional, }} - env = append(env, corev1.EnvVar{Name: EnvVarDataSourcePassword, ValueFrom: dbPassword}) + env = AddEnvFrom(env, rCustomEnvs, EnvVarDataSourcePassword, dbPassword) } // CUSTOM_METRICS environment variable - if customMetricsName := api.Spec.Exporter.ExporterConfig.Configmap.Name; customMetricsName != "" { + if customMetricsName := a.Spec.ExporterConfig.Configmap.Name; customMetricsName != "" { customMetrics := DefaultExporterConfigmapAbsolutePath - env = append(env, corev1.EnvVar{Name: EnvVarCustomConfigmap, Value: customMetrics}) + + env = AddEnv(env, rCustomEnvs, EnvVarCustomConfigmap, customMetrics) + } + + env = AddEnv(env, rCustomEnvs, EnvVarOracleHome, DefaultOracleHome) + env = AddEnv(env, rCustomEnvs, EnvVarTNSAdmin, DefaultOracleTNSAdmin) + + // LOG_DESTINATION environment variable + if rLogPath != "" { + if rLogFilename == "" { + rLogFilename = DefaultLogFilename + } + d := filepath.Join(rLogPath, rLogFilename) + env = AddEnv(env, rCustomEnvs, EnvVarDataSourceLogDestination, d) } - env = append(env, corev1.EnvVar{Name: EnvVarOracleHome, Value: DefaultOracleHome}) - env = append(env, corev1.EnvVar{Name: EnvVarTNSAdmin, Value: DefaultOracleTNSAdmin}) return env } -// GetExporterReplicas function retrieves replicaCount from api or provides default -func GetExporterReplicas(api *apiv1.DatabaseObserver) int32 { - if rc := api.Spec.Replicas; rc != 0 { +// GetExporterReplicas function retrieves replicaCount from a or provides default +func GetExporterReplicas(a *api.DatabaseObserver) int32 { + if rc := a.Spec.Replicas; rc != 0 { return rc } return int32(DefaultReplicaCount) } -// GetExporterImage function retrieves image from api or provides default -func GetExporterImage(api *apiv1.DatabaseObserver) string { - if img := api.Spec.Exporter.ExporterImage; img != "" { +// GetExporterImage function retrieves image from a or provides default +func GetExporterImage(a *api.DatabaseObserver) string { + if img := a.Spec.Exporter.Deployment.ExporterImage; img != "" { return img } - return DefaultExporterImage - -} - -func IsUpdateRequiredForContainerImage(desired *appsv1.Deployment, found *appsv1.Deployment) bool { - foundImage := found.Spec.Template.Spec.Containers[0].Image - desiredImage := desired.Spec.Template.Spec.Containers[0].Image - - return foundImage != desiredImage -} - -func IsUpdateRequiredForEnvironmentVars(desired *appsv1.Deployment, found *appsv1.Deployment) bool { - var updateEnvsRequired bool - desiredEnvValues := make(map[string]string) - - foundEnvs := found.Spec.Template.Spec.Containers[0].Env - desiredEnvs := desired.Spec.Template.Spec.Containers[0].Env - if len(foundEnvs) != len(desiredEnvs) { - updateEnvsRequired = true - } else { - for _, v := range desiredEnvs { - - if v.Name == EnvVarDataSourceUser || - v.Name == EnvVarDataSourceConnectString || - v.Name == EnvVarDataSourcePassword { - - ref := *(*v.ValueFrom).SecretKeyRef - desiredEnvValues[v.Name] = ref.Key + "-" + ref.Name - - } else if v.Name == EnvVarVaultFingerprint || - v.Name == EnvVarVaultRegion || - v.Name == EnvVarVaultTenancyOCID || - v.Name == EnvVarVaultUserOCID { - - ref := *(*v.ValueFrom).ConfigMapKeyRef - desiredEnvValues[v.Name] = ref.Key + "-" + ref.Name - - } else if v.Name == EnvVarDataSourcePwdVaultId || - v.Name == EnvVarDataSourcePwdVaultSecretName || - v.Name == EnvVarCustomConfigmap { - - desiredEnvValues[v.Name] = v.Value - } - } - - for _, v := range foundEnvs { - var foundValue string - - if v.Name == EnvVarDataSourceUser || - v.Name == EnvVarDataSourceConnectString || - v.Name == EnvVarDataSourcePassword { - - ref := *(*v.ValueFrom).SecretKeyRef - foundValue = ref.Key + "-" + ref.Name - - } else if v.Name == EnvVarVaultFingerprint || - v.Name == EnvVarVaultRegion || - v.Name == EnvVarVaultTenancyOCID || - v.Name == EnvVarVaultUserOCID { - - ref := *(*v.ValueFrom).ConfigMapKeyRef - foundValue = ref.Key + "-" + ref.Name - - } else if v.Name == EnvVarDataSourcePwdVaultId || - v.Name == EnvVarDataSourcePwdVaultSecretName || - v.Name == EnvVarCustomConfigmap { - - foundValue = v.Value - } - - if desiredEnvValues[v.Name] != foundValue { - updateEnvsRequired = true - } - } - } - return updateEnvsRequired -} - -func IsUpdateRequiredForVolumes(desired *appsv1.Deployment, found *appsv1.Deployment) bool { - var updateVolumesRequired bool - var foundConfigmap, desiredConfigmap string - var foundWalletSecret, desiredWalletSecret string - var foundOCIConfig, desiredOCIConfig string - desiredVolumes := desired.Spec.Template.Spec.Volumes - foundVolumes := found.Spec.Template.Spec.Volumes - - if len(desiredVolumes) != len(foundVolumes) { - updateVolumesRequired = true - } else { - for _, v := range desiredVolumes { - if v.Name == DefaultConfigVolumeString { - desiredConfigmap = v.ConfigMap.Name - for _, key := range v.ConfigMap.Items { - desiredConfigmap += key.Key - } - } else if v.Name == DefaultWalletVolumeString { - desiredWalletSecret = v.VolumeSource.Secret.SecretName - - } else if v.Name == DefaultOCIPrivateKeyVolumeString { - desiredOCIConfig = v.VolumeSource.Secret.SecretName - } - } - - for _, v := range foundVolumes { - if v.Name == DefaultConfigVolumeString { - foundConfigmap = v.ConfigMap.Name - for _, key := range v.ConfigMap.Items { - foundConfigmap += key.Key - } - } else if v.Name == DefaultWalletVolumeString { - foundWalletSecret = v.VolumeSource.Secret.SecretName - - } else if v.Name == DefaultOCIPrivateKeyVolumeString { - foundOCIConfig = v.VolumeSource.Secret.SecretName - } - } - } - - return updateVolumesRequired || - desiredConfigmap != foundConfigmap || - desiredWalletSecret != foundWalletSecret || - desiredOCIConfig != foundOCIConfig -} - -func IsUpdateRequiredForReplicas(desired *appsv1.Deployment, found *appsv1.Deployment) bool { - foundReplicas := *found.Spec.Replicas - desiredReplicas := *desired.Spec.Replicas + return DefaultExporterImage - return desiredReplicas != foundReplicas } diff --git a/commons/oci/containerdatabase.go b/commons/oci/containerdatabase.go index a5313d41..9391d6f8 100644 --- a/commons/oci/containerdatabase.go +++ b/commons/oci/containerdatabase.go @@ -44,13 +44,13 @@ import ( "github.com/oracle/oci-go-sdk/v65/common" "github.com/oracle/oci-go-sdk/v65/database" - dbv1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" ) /******************************** * Autonomous Container Database *******************************/ -func (d *databaseService) CreateAutonomousContainerDatabase(acd *dbv1alpha1.AutonomousContainerDatabase) (database.CreateAutonomousContainerDatabaseResponse, error) { +func (d *DatabaseService) CreateAutonomousContainerDatabase(acd *dbv4.AutonomousContainerDatabase) (database.CreateAutonomousContainerDatabaseResponse, error) { createAutonomousContainerDatabaseRequest := database.CreateAutonomousContainerDatabaseRequest{ CreateAutonomousContainerDatabaseDetails: database.CreateAutonomousContainerDatabaseDetails{ CompartmentId: acd.Spec.CompartmentOCID, @@ -63,7 +63,7 @@ func (d *databaseService) CreateAutonomousContainerDatabase(acd *dbv1alpha1.Auto return d.dbClient.CreateAutonomousContainerDatabase(context.TODO(), createAutonomousContainerDatabaseRequest) } -func (d *databaseService) GetAutonomousContainerDatabase(acdOCID string) (database.GetAutonomousContainerDatabaseResponse, error) { +func (d *DatabaseService) GetAutonomousContainerDatabase(acdOCID string) (database.GetAutonomousContainerDatabaseResponse, error) { getAutonomousContainerDatabaseRequest := database.GetAutonomousContainerDatabaseRequest{ AutonomousContainerDatabaseId: common.String(acdOCID), } @@ -71,7 +71,7 @@ func (d *databaseService) GetAutonomousContainerDatabase(acdOCID string) (databa return d.dbClient.GetAutonomousContainerDatabase(context.TODO(), getAutonomousContainerDatabaseRequest) } -func (d *databaseService) UpdateAutonomousContainerDatabase(acdOCID string, difACD *dbv1alpha1.AutonomousContainerDatabase) (database.UpdateAutonomousContainerDatabaseResponse, error) { +func (d *DatabaseService) UpdateAutonomousContainerDatabase(acdOCID string, difACD *dbv4.AutonomousContainerDatabase) (database.UpdateAutonomousContainerDatabaseResponse, error) { updateAutonomousContainerDatabaseRequest := database.UpdateAutonomousContainerDatabaseRequest{ AutonomousContainerDatabaseId: common.String(acdOCID), UpdateAutonomousContainerDatabaseDetails: database.UpdateAutonomousContainerDatabaseDetails{ @@ -84,7 +84,7 @@ func (d *databaseService) UpdateAutonomousContainerDatabase(acdOCID string, difA return d.dbClient.UpdateAutonomousContainerDatabase(context.TODO(), updateAutonomousContainerDatabaseRequest) } -func (d *databaseService) RestartAutonomousContainerDatabase(acdOCID string) (database.RestartAutonomousContainerDatabaseResponse, error) { +func (d *DatabaseService) RestartAutonomousContainerDatabase(acdOCID string) (database.RestartAutonomousContainerDatabaseResponse, error) { restartRequest := database.RestartAutonomousContainerDatabaseRequest{ AutonomousContainerDatabaseId: common.String(acdOCID), } @@ -92,7 +92,7 @@ func (d *databaseService) RestartAutonomousContainerDatabase(acdOCID string) (da return d.dbClient.RestartAutonomousContainerDatabase(context.TODO(), restartRequest) } -func (d *databaseService) TerminateAutonomousContainerDatabase(acdOCID string) (database.TerminateAutonomousContainerDatabaseResponse, error) { +func (d *DatabaseService) TerminateAutonomousContainerDatabase(acdOCID string) (database.TerminateAutonomousContainerDatabaseResponse, error) { terminateRequest := database.TerminateAutonomousContainerDatabaseRequest{ AutonomousContainerDatabaseId: common.String(acdOCID), } diff --git a/commons/oci/database.go b/commons/oci/database.go index 9c3cd4d8..e43afb56 100644 --- a/commons/oci/database.go +++ b/commons/oci/database.go @@ -47,38 +47,11 @@ import ( "github.com/oracle/oci-go-sdk/v65/database" "sigs.k8s.io/controller-runtime/pkg/client" - dbv1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" "github.com/oracle/oracle-database-operator/commons/k8s" ) -type DatabaseService interface { - CreateAutonomousDatabase(adb *dbv1alpha1.AutonomousDatabase) (database.CreateAutonomousDatabaseResponse, error) - GetAutonomousDatabase(adbOCID string) (database.GetAutonomousDatabaseResponse, error) - UpdateAutonomousDatabaseGeneralFields(adbOCID string, difADB *dbv1alpha1.AutonomousDatabase) (resp database.UpdateAutonomousDatabaseResponse, err error) - UpdateAutonomousDatabaseDBWorkload(adbOCID string, difADB *dbv1alpha1.AutonomousDatabase) (resp database.UpdateAutonomousDatabaseResponse, err error) - UpdateAutonomousDatabaseLicenseModel(adbOCID string, difADB *dbv1alpha1.AutonomousDatabase) (resp database.UpdateAutonomousDatabaseResponse, err error) - UpdateAutonomousDatabaseAdminPassword(adbOCID string, difADB *dbv1alpha1.AutonomousDatabase) (resp database.UpdateAutonomousDatabaseResponse, err error) - UpdateAutonomousDatabaseScalingFields(adbOCID string, difADB *dbv1alpha1.AutonomousDatabase) (resp database.UpdateAutonomousDatabaseResponse, err error) - UpdateNetworkAccessMTLSRequired(adbOCID string) (resp database.UpdateAutonomousDatabaseResponse, err error) - UpdateNetworkAccessMTLS(adbOCID string, difADB *dbv1alpha1.AutonomousDatabase) (resp database.UpdateAutonomousDatabaseResponse, err error) - UpdateNetworkAccessPublic(lastAccessType dbv1alpha1.NetworkAccessTypeEnum, adbOCID string) (resp database.UpdateAutonomousDatabaseResponse, err error) - UpdateNetworkAccess(adbOCID string, difADB *dbv1alpha1.AutonomousDatabase) (resp database.UpdateAutonomousDatabaseResponse, err error) - StartAutonomousDatabase(adbOCID string) (database.StartAutonomousDatabaseResponse, error) - StopAutonomousDatabase(adbOCID string) (database.StopAutonomousDatabaseResponse, error) - DeleteAutonomousDatabase(adbOCID string) (database.DeleteAutonomousDatabaseResponse, error) - DownloadWallet(adb *dbv1alpha1.AutonomousDatabase) (database.GenerateAutonomousDatabaseWalletResponse, error) - RestoreAutonomousDatabase(adbOCID string, sdkTime common.SDKTime) (database.RestoreAutonomousDatabaseResponse, error) - ListAutonomousDatabaseBackups(adbOCID string) (database.ListAutonomousDatabaseBackupsResponse, error) - CreateAutonomousDatabaseBackup(adbBackup *dbv1alpha1.AutonomousDatabaseBackup, adbOCID string) (database.CreateAutonomousDatabaseBackupResponse, error) - GetAutonomousDatabaseBackup(backupOCID string) (database.GetAutonomousDatabaseBackupResponse, error) - CreateAutonomousContainerDatabase(acd *dbv1alpha1.AutonomousContainerDatabase) (database.CreateAutonomousContainerDatabaseResponse, error) - GetAutonomousContainerDatabase(acdOCID string) (database.GetAutonomousContainerDatabaseResponse, error) - UpdateAutonomousContainerDatabase(acdOCID string, difACD *dbv1alpha1.AutonomousContainerDatabase) (database.UpdateAutonomousContainerDatabaseResponse, error) - RestartAutonomousContainerDatabase(acdOCID string) (database.RestartAutonomousContainerDatabaseResponse, error) - TerminateAutonomousContainerDatabase(acdOCID string) (database.TerminateAutonomousContainerDatabaseResponse, error) -} - -type databaseService struct { +type DatabaseService struct { logger logr.Logger kubeClient client.Client dbClient database.DatabaseClient @@ -88,19 +61,19 @@ type databaseService struct { func NewDatabaseService( logger logr.Logger, kubeClient client.Client, - provider common.ConfigurationProvider) (DatabaseService, error) { + provider common.ConfigurationProvider) (databaseService DatabaseService, err error) { dbClient, err := database.NewDatabaseClientWithConfigurationProvider(provider) if err != nil { - return nil, err + return databaseService, err } vaultService, err := NewVaultService(logger, provider) if err != nil { - return nil, err + return databaseService, err } - return &databaseService{ + return DatabaseService{ logger: logger.WithName("dbService"), kubeClient: kubeClient, dbClient: dbClient, @@ -114,7 +87,7 @@ func NewDatabaseService( // ReadPassword reads the password from passwordSpec, and returns the pointer to the read password string. // The function returns a nil if nothing is read -func (d *databaseService) readPassword(namespace string, passwordSpec dbv1alpha1.PasswordSpec) (*string, error) { +func (d *DatabaseService) readPassword(namespace string, passwordSpec dbv4.PasswordSpec) (*string, error) { logger := d.logger.WithName("readPassword") if passwordSpec.K8sSecret.Name != nil { @@ -129,10 +102,10 @@ func (d *databaseService) readPassword(namespace string, passwordSpec dbv1alpha1 return common.String(password), nil } - if passwordSpec.OCISecret.OCID != nil { - logger.Info(fmt.Sprintf("Getting password from OCI Vault Secret OCID %s", *passwordSpec.OCISecret.OCID)) + if passwordSpec.OciSecret.Id != nil { + logger.Info(fmt.Sprintf("Getting password from OCI Vault Secret OCID %s", *passwordSpec.OciSecret.Id)) - password, err := d.vaultService.GetSecretValue(*passwordSpec.OCISecret.OCID) + password, err := d.vaultService.GetSecretValue(*passwordSpec.OciSecret.Id) if err != nil { return nil, err } @@ -142,14 +115,14 @@ func (d *databaseService) readPassword(namespace string, passwordSpec dbv1alpha1 return nil, nil } -func (d *databaseService) readACD_OCID(acd *dbv1alpha1.ACDSpec, namespace string) (*string, error) { - if acd.OCIACD.OCID != nil { - return acd.OCIACD.OCID, nil +func (d *DatabaseService) readACD_OCID(acd *dbv4.AcdSpec, namespace string) (*string, error) { + if acd.OciAcd.Id != nil { + return acd.OciAcd.Id, nil } - if acd.K8sACD.Name != nil { - fetchedACD := &dbv1alpha1.AutonomousContainerDatabase{} - if err := k8s.FetchResource(d.kubeClient, namespace, *acd.K8sACD.Name, fetchedACD); err != nil { + if acd.K8sAcd.Name != nil { + fetchedACD := &dbv4.AutonomousContainerDatabase{} + if err := k8s.FetchResource(d.kubeClient, namespace, *acd.K8sAcd.Name, fetchedACD); err != nil { return nil, err } @@ -160,7 +133,7 @@ func (d *databaseService) readACD_OCID(acd *dbv1alpha1.ACDSpec, namespace string } // CreateAutonomousDatabase sends a request to OCI to provision a database and returns the AutonomousDatabase OCID. -func (d *databaseService) CreateAutonomousDatabase(adb *dbv1alpha1.AutonomousDatabase) (resp database.CreateAutonomousDatabaseResponse, err error) { +func (d *DatabaseService) CreateAutonomousDatabase(adb *dbv4.AutonomousDatabase) (resp database.CreateAutonomousDatabaseResponse, err error) { adminPassword, err := d.readPassword(adb.Namespace, adb.Spec.Details.AdminPassword) if err != nil { return resp, err @@ -172,9 +145,12 @@ func (d *databaseService) CreateAutonomousDatabase(adb *dbv1alpha1.AutonomousDat } createAutonomousDatabaseDetails := database.CreateAutonomousDatabaseDetails{ - CompartmentId: adb.Spec.Details.CompartmentOCID, + CompartmentId: adb.Spec.Details.CompartmentId, DbName: adb.Spec.Details.DbName, - CpuCoreCount: adb.Spec.Details.CPUCoreCount, + CpuCoreCount: adb.Spec.Details.CpuCoreCount, + ComputeModel: database.CreateAutonomousDatabaseBaseComputeModelEnum(adb.Spec.Details.ComputeModel), + ComputeCount: adb.Spec.Details.ComputeCount, + OcpuCount: adb.Spec.Details.OcpuCount, DataStorageSizeInTBs: adb.Spec.Details.DataStorageSizeInTBs, AdminPassword: adminPassword, DisplayName: adb.Spec.Details.DisplayName, @@ -182,21 +158,26 @@ func (d *databaseService) CreateAutonomousDatabase(adb *dbv1alpha1.AutonomousDat IsDedicated: adb.Spec.Details.IsDedicated, AutonomousContainerDatabaseId: acdOCID, DbVersion: adb.Spec.Details.DbVersion, - DbWorkload: database.CreateAutonomousDatabaseBaseDbWorkloadEnum( - adb.Spec.Details.DbWorkload), - LicenseModel: database.CreateAutonomousDatabaseBaseLicenseModelEnum(adb.Spec.Details.LicenseModel), - IsAccessControlEnabled: adb.Spec.Details.NetworkAccess.IsAccessControlEnabled, - WhitelistedIps: adb.Spec.Details.NetworkAccess.AccessControlList, - IsMtlsConnectionRequired: adb.Spec.Details.NetworkAccess.IsMTLSConnectionRequired, - SubnetId: adb.Spec.Details.NetworkAccess.PrivateEndpoint.SubnetOCID, - NsgIds: adb.Spec.Details.NetworkAccess.PrivateEndpoint.NsgOCIDs, - PrivateEndpointLabel: adb.Spec.Details.NetworkAccess.PrivateEndpoint.HostnamePrefix, + DbWorkload: database.CreateAutonomousDatabaseBaseDbWorkloadEnum(adb.Spec.Details.DbWorkload), + LicenseModel: database.CreateAutonomousDatabaseBaseLicenseModelEnum(adb.Spec.Details.LicenseModel), + IsFreeTier: adb.Spec.Details.IsFreeTier, + IsAccessControlEnabled: adb.Spec.Details.IsAccessControlEnabled, + WhitelistedIps: adb.Spec.Details.WhitelistedIps, + IsMtlsConnectionRequired: adb.Spec.Details.IsMtlsConnectionRequired, + SubnetId: adb.Spec.Details.SubnetId, + NsgIds: adb.Spec.Details.NsgIds, + PrivateEndpointLabel: adb.Spec.Details.PrivateEndpointLabel, FreeformTags: adb.Spec.Details.FreeformTags, } + retryPolicy := common.DefaultRetryPolicy() + createAutonomousDatabaseRequest := database.CreateAutonomousDatabaseRequest{ CreateAutonomousDatabaseDetails: createAutonomousDatabaseDetails, + RequestMetadata: common.RequestMetadata{ + RetryPolicy: &retryPolicy, + }, } resp, err = d.dbClient.CreateAutonomousDatabase(context.TODO(), createAutonomousDatabaseRequest) @@ -207,166 +188,116 @@ func (d *databaseService) CreateAutonomousDatabase(adb *dbv1alpha1.AutonomousDat return resp, nil } -func (d *databaseService) GetAutonomousDatabase(adbOCID string) (database.GetAutonomousDatabaseResponse, error) { - getAutonomousDatabaseRequest := database.GetAutonomousDatabaseRequest{ - AutonomousDatabaseId: common.String(adbOCID), - } +func (d *DatabaseService) GetAutonomousDatabase(adbOCID string) (database.GetAutonomousDatabaseResponse, error) { + retryPolicy := common.DefaultRetryPolicy() - return d.dbClient.GetAutonomousDatabase(context.TODO(), getAutonomousDatabaseRequest) -} - -func (d *databaseService) UpdateAutonomousDatabaseGeneralFields(adbOCID string, difADB *dbv1alpha1.AutonomousDatabase) (resp database.UpdateAutonomousDatabaseResponse, err error) { - updateAutonomousDatabaseRequest := database.UpdateAutonomousDatabaseRequest{ - AutonomousDatabaseId: common.String(adbOCID), - UpdateAutonomousDatabaseDetails: database.UpdateAutonomousDatabaseDetails{ - DisplayName: difADB.Spec.Details.DisplayName, - DbName: difADB.Spec.Details.DbName, - DbVersion: difADB.Spec.Details.DbVersion, - FreeformTags: difADB.Spec.Details.FreeformTags, - }, - } - return d.dbClient.UpdateAutonomousDatabase(context.TODO(), updateAutonomousDatabaseRequest) -} - -func (d *databaseService) UpdateAutonomousDatabaseDBWorkload(adbOCID string, difADB *dbv1alpha1.AutonomousDatabase) (resp database.UpdateAutonomousDatabaseResponse, err error) { - updateAutonomousDatabaseRequest := database.UpdateAutonomousDatabaseRequest{ + getAutonomousDatabaseRequest := database.GetAutonomousDatabaseRequest{ AutonomousDatabaseId: common.String(adbOCID), - UpdateAutonomousDatabaseDetails: database.UpdateAutonomousDatabaseDetails{ - DbWorkload: database.UpdateAutonomousDatabaseDetailsDbWorkloadEnum(difADB.Spec.Details.DbWorkload), + RequestMetadata: common.RequestMetadata{ + RetryPolicy: &retryPolicy, }, } - return d.dbClient.UpdateAutonomousDatabase(context.TODO(), updateAutonomousDatabaseRequest) -} -func (d *databaseService) UpdateAutonomousDatabaseLicenseModel(adbOCID string, difADB *dbv1alpha1.AutonomousDatabase) (resp database.UpdateAutonomousDatabaseResponse, err error) { - updateAutonomousDatabaseRequest := database.UpdateAutonomousDatabaseRequest{ - AutonomousDatabaseId: common.String(adbOCID), - UpdateAutonomousDatabaseDetails: database.UpdateAutonomousDatabaseDetails{ - LicenseModel: database.UpdateAutonomousDatabaseDetailsLicenseModelEnum(difADB.Spec.Details.LicenseModel), - }, - } - return d.dbClient.UpdateAutonomousDatabase(context.TODO(), updateAutonomousDatabaseRequest) + return d.dbClient.GetAutonomousDatabase(context.TODO(), getAutonomousDatabaseRequest) } -func (d *databaseService) UpdateAutonomousDatabaseAdminPassword(adbOCID string, difADB *dbv1alpha1.AutonomousDatabase) (resp database.UpdateAutonomousDatabaseResponse, err error) { - adminPassword, err := d.readPassword(difADB.Namespace, difADB.Spec.Details.AdminPassword) +func (d *DatabaseService) UpdateAutonomousDatabase(adbOCID string, adb *dbv4.AutonomousDatabase) (resp database.UpdateAutonomousDatabaseResponse, err error) { + // Retrieve admin password + adminPassword, err := d.readPassword(adb.Namespace, adb.Spec.Details.AdminPassword) if err != nil { return resp, err } - updateAutonomousDatabaseRequest := database.UpdateAutonomousDatabaseRequest{ - AutonomousDatabaseId: common.String(adbOCID), - UpdateAutonomousDatabaseDetails: database.UpdateAutonomousDatabaseDetails{ - AdminPassword: adminPassword, - }, - } - return d.dbClient.UpdateAutonomousDatabase(context.TODO(), updateAutonomousDatabaseRequest) -} + retryPolicy := common.DefaultRetryPolicy() -func (d *databaseService) UpdateAutonomousDatabaseScalingFields(adbOCID string, difADB *dbv1alpha1.AutonomousDatabase) (resp database.UpdateAutonomousDatabaseResponse, err error) { updateAutonomousDatabaseRequest := database.UpdateAutonomousDatabaseRequest{ AutonomousDatabaseId: common.String(adbOCID), UpdateAutonomousDatabaseDetails: database.UpdateAutonomousDatabaseDetails{ - DataStorageSizeInTBs: difADB.Spec.Details.DataStorageSizeInTBs, - CpuCoreCount: difADB.Spec.Details.CPUCoreCount, - IsAutoScalingEnabled: difADB.Spec.Details.IsAutoScalingEnabled, + DisplayName: adb.Spec.Details.DisplayName, + DbName: adb.Spec.Details.DbName, + DbVersion: adb.Spec.Details.DbVersion, + FreeformTags: adb.Spec.Details.FreeformTags, + DbWorkload: database.UpdateAutonomousDatabaseDetailsDbWorkloadEnum(adb.Spec.Details.DbWorkload), + LicenseModel: database.UpdateAutonomousDatabaseDetailsLicenseModelEnum(adb.Spec.Details.LicenseModel), + AdminPassword: adminPassword, + DataStorageSizeInTBs: adb.Spec.Details.DataStorageSizeInTBs, + CpuCoreCount: adb.Spec.Details.CpuCoreCount, + ComputeModel: database.UpdateAutonomousDatabaseDetailsComputeModelEnum(adb.Spec.Details.ComputeModel), + ComputeCount: adb.Spec.Details.ComputeCount, + OcpuCount: adb.Spec.Details.OcpuCount, + IsAutoScalingEnabled: adb.Spec.Details.IsAutoScalingEnabled, + IsFreeTier: adb.Spec.Details.IsFreeTier, + IsMtlsConnectionRequired: adb.Spec.Details.IsMtlsConnectionRequired, + IsAccessControlEnabled: adb.Spec.Details.IsAccessControlEnabled, + WhitelistedIps: adb.Spec.Details.WhitelistedIps, + SubnetId: adb.Spec.Details.SubnetId, + NsgIds: adb.Spec.Details.NsgIds, + PrivateEndpointLabel: adb.Spec.Details.PrivateEndpointLabel, }, - } - return d.dbClient.UpdateAutonomousDatabase(context.TODO(), updateAutonomousDatabaseRequest) -} - -func (d *databaseService) UpdateNetworkAccessMTLSRequired(adbOCID string) (resp database.UpdateAutonomousDatabaseResponse, err error) { - updateAutonomousDatabaseRequest := database.UpdateAutonomousDatabaseRequest{ - AutonomousDatabaseId: common.String(adbOCID), - UpdateAutonomousDatabaseDetails: database.UpdateAutonomousDatabaseDetails{ - IsMtlsConnectionRequired: common.Bool(true), - }, - } - return d.dbClient.UpdateAutonomousDatabase(context.TODO(), updateAutonomousDatabaseRequest) -} - -func (d *databaseService) UpdateNetworkAccessMTLS(adbOCID string, difADB *dbv1alpha1.AutonomousDatabase) (resp database.UpdateAutonomousDatabaseResponse, err error) { - updateAutonomousDatabaseRequest := database.UpdateAutonomousDatabaseRequest{ - AutonomousDatabaseId: common.String(adbOCID), - UpdateAutonomousDatabaseDetails: database.UpdateAutonomousDatabaseDetails{ - IsMtlsConnectionRequired: difADB.Spec.Details.NetworkAccess.IsMTLSConnectionRequired, + RequestMetadata: common.RequestMetadata{ + RetryPolicy: &retryPolicy, }, } return d.dbClient.UpdateAutonomousDatabase(context.TODO(), updateAutonomousDatabaseRequest) } -func (d *databaseService) UpdateNetworkAccessPublic( - lastAccessType dbv1alpha1.NetworkAccessTypeEnum, - adbOCID string) (resp database.UpdateAutonomousDatabaseResponse, err error) { - - updateAutonomousDatabaseDetails := database.UpdateAutonomousDatabaseDetails{} +func (d *DatabaseService) StartAutonomousDatabase(adbOCID string) (database.StartAutonomousDatabaseResponse, error) { + retryPolicy := common.DefaultRetryPolicy() - if lastAccessType == dbv1alpha1.NetworkAccessTypeRestricted { - updateAutonomousDatabaseDetails.WhitelistedIps = []string{""} - } else if lastAccessType == dbv1alpha1.NetworkAccessTypePrivate { - updateAutonomousDatabaseDetails.PrivateEndpointLabel = common.String("") - } - - updateAutonomousDatabaseRequest := database.UpdateAutonomousDatabaseRequest{ - AutonomousDatabaseId: common.String(adbOCID), - UpdateAutonomousDatabaseDetails: updateAutonomousDatabaseDetails, - } - - return d.dbClient.UpdateAutonomousDatabase(context.TODO(), updateAutonomousDatabaseRequest) -} - -func (d *databaseService) UpdateNetworkAccess(adbOCID string, difADB *dbv1alpha1.AutonomousDatabase) (resp database.UpdateAutonomousDatabaseResponse, err error) { - updateAutonomousDatabaseRequest := database.UpdateAutonomousDatabaseRequest{ - AutonomousDatabaseId: common.String(adbOCID), - UpdateAutonomousDatabaseDetails: database.UpdateAutonomousDatabaseDetails{ - IsAccessControlEnabled: difADB.Spec.Details.NetworkAccess.IsAccessControlEnabled, - WhitelistedIps: difADB.Spec.Details.NetworkAccess.AccessControlList, - SubnetId: difADB.Spec.Details.NetworkAccess.PrivateEndpoint.SubnetOCID, - NsgIds: difADB.Spec.Details.NetworkAccess.PrivateEndpoint.NsgOCIDs, - PrivateEndpointLabel: difADB.Spec.Details.NetworkAccess.PrivateEndpoint.HostnamePrefix, - }, - } - - return d.dbClient.UpdateAutonomousDatabase(context.TODO(), updateAutonomousDatabaseRequest) -} - -func (d *databaseService) StartAutonomousDatabase(adbOCID string) (database.StartAutonomousDatabaseResponse, error) { startRequest := database.StartAutonomousDatabaseRequest{ AutonomousDatabaseId: common.String(adbOCID), + RequestMetadata: common.RequestMetadata{ + RetryPolicy: &retryPolicy, + }, } return d.dbClient.StartAutonomousDatabase(context.TODO(), startRequest) } -func (d *databaseService) StopAutonomousDatabase(adbOCID string) (database.StopAutonomousDatabaseResponse, error) { +func (d *DatabaseService) StopAutonomousDatabase(adbOCID string) (database.StopAutonomousDatabaseResponse, error) { + retryPolicy := common.DefaultRetryPolicy() + stopRequest := database.StopAutonomousDatabaseRequest{ AutonomousDatabaseId: common.String(adbOCID), + RequestMetadata: common.RequestMetadata{ + RetryPolicy: &retryPolicy, + }, } return d.dbClient.StopAutonomousDatabase(context.TODO(), stopRequest) } -func (d *databaseService) DeleteAutonomousDatabase(adbOCID string) (database.DeleteAutonomousDatabaseResponse, error) { +func (d *DatabaseService) DeleteAutonomousDatabase(adbOCID string) (database.DeleteAutonomousDatabaseResponse, error) { + retryPolicy := common.DefaultRetryPolicy() + deleteRequest := database.DeleteAutonomousDatabaseRequest{ AutonomousDatabaseId: common.String(adbOCID), + RequestMetadata: common.RequestMetadata{ + RetryPolicy: &retryPolicy, + }, } return d.dbClient.DeleteAutonomousDatabase(context.TODO(), deleteRequest) } -func (d *databaseService) DownloadWallet(adb *dbv1alpha1.AutonomousDatabase) (resp database.GenerateAutonomousDatabaseWalletResponse, err error) { +func (d *DatabaseService) DownloadWallet(adb *dbv4.AutonomousDatabase) (resp database.GenerateAutonomousDatabaseWalletResponse, err error) { // Prepare wallet password - walletPassword, err := d.readPassword(adb.Namespace, adb.Spec.Details.Wallet.Password) + walletPassword, err := d.readPassword(adb.Namespace, adb.Spec.Wallet.Password) if err != nil { return resp, err } + retryPolicy := common.DefaultRetryPolicy() + // Download a Wallet req := database.GenerateAutonomousDatabaseWalletRequest{ - AutonomousDatabaseId: adb.Spec.Details.AutonomousDatabaseOCID, + AutonomousDatabaseId: adb.Spec.Details.Id, GenerateAutonomousDatabaseWalletDetails: database.GenerateAutonomousDatabaseWalletDetails{ Password: walletPassword, }, + RequestMetadata: common.RequestMetadata{ + RetryPolicy: &retryPolicy, + }, } // Send the request using the service client @@ -382,12 +313,17 @@ func (d *databaseService) DownloadWallet(adb *dbv1alpha1.AutonomousDatabase) (re * Autonomous Database Restore *******************************/ -func (d *databaseService) RestoreAutonomousDatabase(adbOCID string, sdkTime common.SDKTime) (database.RestoreAutonomousDatabaseResponse, error) { +func (d *DatabaseService) RestoreAutonomousDatabase(adbOCID string, sdkTime common.SDKTime) (database.RestoreAutonomousDatabaseResponse, error) { + retryPolicy := common.DefaultRetryPolicy() + request := database.RestoreAutonomousDatabaseRequest{ AutonomousDatabaseId: common.String(adbOCID), RestoreAutonomousDatabaseDetails: database.RestoreAutonomousDatabaseDetails{ Timestamp: &sdkTime, }, + RequestMetadata: common.RequestMetadata{ + RetryPolicy: &retryPolicy, + }, } return d.dbClient.RestoreAutonomousDatabase(context.TODO(), request) } @@ -396,21 +332,31 @@ func (d *databaseService) RestoreAutonomousDatabase(adbOCID string, sdkTime comm * Autonomous Database Backup *******************************/ -func (d *databaseService) ListAutonomousDatabaseBackups(adbOCID string) (database.ListAutonomousDatabaseBackupsResponse, error) { +func (d *DatabaseService) ListAutonomousDatabaseBackups(adbOCID string) (database.ListAutonomousDatabaseBackupsResponse, error) { + retryPolicy := common.DefaultRetryPolicy() + listBackupRequest := database.ListAutonomousDatabaseBackupsRequest{ AutonomousDatabaseId: common.String(adbOCID), + RequestMetadata: common.RequestMetadata{ + RetryPolicy: &retryPolicy, + }, } return d.dbClient.ListAutonomousDatabaseBackups(context.TODO(), listBackupRequest) } -func (d *databaseService) CreateAutonomousDatabaseBackup(adbBackup *dbv1alpha1.AutonomousDatabaseBackup, adbOCID string) (database.CreateAutonomousDatabaseBackupResponse, error) { +func (d *DatabaseService) CreateAutonomousDatabaseBackup(adbBackup *dbv4.AutonomousDatabaseBackup, adbOCID string) (database.CreateAutonomousDatabaseBackupResponse, error) { + retryPolicy := common.DefaultRetryPolicy() + createBackupRequest := database.CreateAutonomousDatabaseBackupRequest{ CreateAutonomousDatabaseBackupDetails: database.CreateAutonomousDatabaseBackupDetails{ AutonomousDatabaseId: common.String(adbOCID), IsLongTermBackup: adbBackup.Spec.IsLongTermBackup, RetentionPeriodInDays: adbBackup.Spec.RetentionPeriodInDays, }, + RequestMetadata: common.RequestMetadata{ + RetryPolicy: &retryPolicy, + }, } // Use the spec.displayName as the displayName of the backup if is provided, @@ -424,10 +370,63 @@ func (d *databaseService) CreateAutonomousDatabaseBackup(adbBackup *dbv1alpha1.A return d.dbClient.CreateAutonomousDatabaseBackup(context.TODO(), createBackupRequest) } -func (d *databaseService) GetAutonomousDatabaseBackup(backupOCID string) (database.GetAutonomousDatabaseBackupResponse, error) { +func (d *DatabaseService) GetAutonomousDatabaseBackup(backupOCID string) (database.GetAutonomousDatabaseBackupResponse, error) { + retryPolicy := common.DefaultRetryPolicy() + getBackupRequest := database.GetAutonomousDatabaseBackupRequest{ AutonomousDatabaseBackupId: common.String(backupOCID), + RequestMetadata: common.RequestMetadata{ + RetryPolicy: &retryPolicy, + }, } return d.dbClient.GetAutonomousDatabaseBackup(context.TODO(), getBackupRequest) } + +func (d *DatabaseService) CreateAutonomousDatabaseClone(adb *dbv4.AutonomousDatabase) (resp database.CreateAutonomousDatabaseResponse, err error) { + adminPassword, err := d.readPassword(adb.Namespace, adb.Spec.Clone.AdminPassword) + if err != nil { + return resp, err + } + + acdOCID, err := d.readACD_OCID(&adb.Spec.Clone.AutonomousContainerDatabase, adb.Namespace) + if err != nil { + return resp, err + } + + retryPolicy := common.DefaultRetryPolicy() + request := database.CreateAutonomousDatabaseRequest{ + CreateAutonomousDatabaseDetails: database.CreateAutonomousDatabaseCloneDetails{ + CompartmentId: adb.Spec.Clone.CompartmentId, + SourceId: adb.Spec.Details.Id, + AutonomousContainerDatabaseId: acdOCID, + DisplayName: adb.Spec.Clone.DisplayName, + DbName: adb.Spec.Clone.DbName, + DbWorkload: database.CreateAutonomousDatabaseBaseDbWorkloadEnum(adb.Spec.Clone.DbWorkload), + LicenseModel: database.CreateAutonomousDatabaseBaseLicenseModelEnum(adb.Spec.Clone.LicenseModel), + DbVersion: adb.Spec.Clone.DbVersion, + DataStorageSizeInTBs: adb.Spec.Clone.DataStorageSizeInTBs, + CpuCoreCount: adb.Spec.Clone.CpuCoreCount, + ComputeModel: database.CreateAutonomousDatabaseBaseComputeModelEnum(adb.Spec.Clone.ComputeModel), + ComputeCount: adb.Spec.Clone.ComputeCount, + OcpuCount: adb.Spec.Clone.OcpuCount, + AdminPassword: adminPassword, + IsAutoScalingEnabled: adb.Spec.Clone.IsAutoScalingEnabled, + IsDedicated: adb.Spec.Clone.IsDedicated, + IsFreeTier: adb.Spec.Clone.IsFreeTier, + IsAccessControlEnabled: adb.Spec.Clone.IsAccessControlEnabled, + WhitelistedIps: adb.Spec.Clone.WhitelistedIps, + SubnetId: adb.Spec.Clone.SubnetId, + NsgIds: adb.Spec.Clone.NsgIds, + PrivateEndpointLabel: adb.Spec.Clone.PrivateEndpointLabel, + IsMtlsConnectionRequired: adb.Spec.Clone.IsMtlsConnectionRequired, + FreeformTags: adb.Spec.Clone.FreeformTags, + CloneType: adb.Spec.Clone.CloneType, + }, + RequestMetadata: common.RequestMetadata{ + RetryPolicy: &retryPolicy, + }, + } + + return d.dbClient.CreateAutonomousDatabase(context.TODO(), request) +} diff --git a/commons/oci/provider.go b/commons/oci/provider.go index 152f1efd..f466f226 100644 --- a/commons/oci/provider.go +++ b/commons/oci/provider.go @@ -60,13 +60,13 @@ const ( privatekeyKey = "privatekey" ) -type APIKeyAuth struct { +type ApiKeyAuth struct { ConfigMapName *string SecretName *string Namespace string } -func GetOCIProvider(kubeClient client.Client, authData APIKeyAuth) (common.ConfigurationProvider, error) { +func GetOciProvider(kubeClient client.Client, authData ApiKeyAuth) (common.ConfigurationProvider, error) { if authData.ConfigMapName != nil && authData.SecretName == nil { return getWorkloadIdentityProvider(kubeClient, authData) } else if authData.ConfigMapName != nil && authData.SecretName != nil { @@ -84,7 +84,7 @@ func GetOCIProvider(kubeClient client.Client, authData APIKeyAuth) (common.Confi } } -func getWorkloadIdentityProvider(kubeClient client.Client, authData APIKeyAuth) (common.ConfigurationProvider, error) { +func getWorkloadIdentityProvider(kubeClient client.Client, authData ApiKeyAuth) (common.ConfigurationProvider, error) { ociConfigMap, err := k8s.FetchConfigMap(kubeClient, authData.Namespace, *authData.ConfigMapName) if err != nil { return nil, err @@ -108,7 +108,7 @@ func getWorkloadIdentityProvider(kubeClient client.Client, authData APIKeyAuth) return auth.OkeWorkloadIdentityConfigurationProvider() } -func getProviderWithAPIKey(kubeClient client.Client, authData APIKeyAuth) (common.ConfigurationProvider, error) { +func getProviderWithAPIKey(kubeClient client.Client, authData ApiKeyAuth) (common.ConfigurationProvider, error) { var region, fingerprint, user, tenancy, passphrase, privatekeyValue string // Prepare ConfigMap diff --git a/commons/sharding/catalog.go b/commons/sharding/catalog.go index 58f07490..646c89b8 100644 --- a/commons/sharding/catalog.go +++ b/commons/sharding/catalog.go @@ -44,7 +44,7 @@ import ( "strconv" "github.com/go-logr/logr" - databasev1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + databasev4 "github.com/oracle/oracle-database-operator/apis/database/v4" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -53,7 +53,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -func buildLabelsForCatalog(instance *databasev1alpha1.ShardingDatabase, label string) map[string]string { +func buildLabelsForCatalog(instance *databasev4.ShardingDatabase, label string, catalogName string) map[string]string { return map[string]string{ "app": "OracleSharding", "type": "Catalog", @@ -61,7 +61,7 @@ func buildLabelsForCatalog(instance *databasev1alpha1.ShardingDatabase, label st } } -func getLabelForCatalog(instance *databasev1alpha1.ShardingDatabase) string { +func getLabelForCatalog(instance *databasev4.ShardingDatabase) string { // if len(OraCatalogSpex.Label) !=0 { // return OraCatalogSpex.Label @@ -70,7 +70,7 @@ func getLabelForCatalog(instance *databasev1alpha1.ShardingDatabase) string { return instance.Name } -func BuildStatefulSetForCatalog(instance *databasev1alpha1.ShardingDatabase, OraCatalogSpex databasev1alpha1.CatalogSpec) *appsv1.StatefulSet { +func BuildStatefulSetForCatalog(instance *databasev4.ShardingDatabase, OraCatalogSpex databasev4.CatalogSpec) *appsv1.StatefulSet { sfset := &appsv1.StatefulSet{ TypeMeta: buildTypeMetaForCatalog(), ObjectMeta: builObjectMetaForCatalog(instance, OraCatalogSpex), @@ -91,29 +91,29 @@ func buildTypeMetaForCatalog() metav1.TypeMeta { } // Function to build ObjectMeta -func builObjectMetaForCatalog(instance *databasev1alpha1.ShardingDatabase, OraCatalogSpex databasev1alpha1.CatalogSpec) metav1.ObjectMeta { +func builObjectMetaForCatalog(instance *databasev4.ShardingDatabase, OraCatalogSpex databasev4.CatalogSpec) metav1.ObjectMeta { // building objectMeta objmeta := metav1.ObjectMeta{ Name: OraCatalogSpex.Name, - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, OwnerReferences: getOwnerRef(instance), - Labels: buildLabelsForCatalog(instance, "sharding"), + Labels: buildLabelsForCatalog(instance, "sharding", OraCatalogSpex.Name), } return objmeta } // Function to build Stateful Specs -func buildStatefulSpecForCatalog(instance *databasev1alpha1.ShardingDatabase, OraCatalogSpex databasev1alpha1.CatalogSpec) *appsv1.StatefulSetSpec { +func buildStatefulSpecForCatalog(instance *databasev4.ShardingDatabase, OraCatalogSpex databasev4.CatalogSpec) *appsv1.StatefulSetSpec { // building Stateful set Specs sfsetspec := &appsv1.StatefulSetSpec{ ServiceName: OraCatalogSpex.Name, Selector: &metav1.LabelSelector{ - MatchLabels: buildLabelsForCatalog(instance, "sharding"), + MatchLabels: buildLabelsForCatalog(instance, "sharding", OraCatalogSpex.Name), }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: buildLabelsForCatalog(instance, "sharding"), + Labels: buildLabelsForCatalog(instance, "sharding", OraCatalogSpex.Name), }, Spec: *buildPodSpecForCatalog(instance, OraCatalogSpex), }, @@ -131,7 +131,7 @@ func buildStatefulSpecForCatalog(instance *databasev1alpha1.ShardingDatabase, Or // Function to build PodSpec -func buildPodSpecForCatalog(instance *databasev1alpha1.ShardingDatabase, OraCatalogSpex databasev1alpha1.CatalogSpec) *corev1.PodSpec { +func buildPodSpecForCatalog(instance *databasev4.ShardingDatabase, OraCatalogSpex databasev4.CatalogSpec) *corev1.PodSpec { user := oraRunAsUser group := oraFsGroup @@ -166,7 +166,7 @@ func buildPodSpecForCatalog(instance *databasev1alpha1.ShardingDatabase, OraCata } // Function to build Volume Spec -func buildVolumeSpecForCatalog(instance *databasev1alpha1.ShardingDatabase, OraCatalogSpex databasev1alpha1.CatalogSpec) []corev1.Volume { +func buildVolumeSpecForCatalog(instance *databasev4.ShardingDatabase, OraCatalogSpex databasev4.CatalogSpec) []corev1.Volume { var result []corev1.Volume result = []corev1.Volume{ { @@ -207,7 +207,7 @@ func buildVolumeSpecForCatalog(instance *databasev1alpha1.ShardingDatabase, OraC } // Function to build the container Specification -func buildContainerSpecForCatalog(instance *databasev1alpha1.ShardingDatabase, OraCatalogSpex databasev1alpha1.CatalogSpec) []corev1.Container { +func buildContainerSpecForCatalog(instance *databasev4.ShardingDatabase, OraCatalogSpex databasev4.CatalogSpec) []corev1.Container { // building Continer spec var result []corev1.Container containerSpec := corev1.Container{ @@ -284,7 +284,7 @@ func buildContainerSpecForCatalog(instance *databasev1alpha1.ShardingDatabase, O } // Function to build the init Container Spec -func buildInitContainerSpecForCatalog(instance *databasev1alpha1.ShardingDatabase, OraCatalogSpex databasev1alpha1.CatalogSpec) []corev1.Container { +func buildInitContainerSpecForCatalog(instance *databasev4.ShardingDatabase, OraCatalogSpex databasev4.CatalogSpec) []corev1.Container { var result []corev1.Container // building the init Container Spec privFlag := true @@ -320,7 +320,7 @@ func buildInitContainerSpecForCatalog(instance *databasev1alpha1.ShardingDatabas return result } -func buildVolumeMountSpecForCatalog(instance *databasev1alpha1.ShardingDatabase, OraCatalogSpex databasev1alpha1.CatalogSpec) []corev1.VolumeMount { +func buildVolumeMountSpecForCatalog(instance *databasev4.ShardingDatabase, OraCatalogSpex databasev4.CatalogSpec) []corev1.VolumeMount { var result []corev1.VolumeMount result = append(result, corev1.VolumeMount{Name: OraCatalogSpex.Name + "secretmap-vol3", MountPath: oraSecretMount, ReadOnly: true}) result = append(result, corev1.VolumeMount{Name: OraCatalogSpex.Name + "-oradata-vol4", MountPath: oraDataMount}) @@ -345,7 +345,7 @@ func buildVolumeMountSpecForCatalog(instance *databasev1alpha1.ShardingDatabase, return result } -func volumeClaimTemplatesForCatalog(instance *databasev1alpha1.ShardingDatabase, OraCatalogSpex databasev1alpha1.CatalogSpec) []corev1.PersistentVolumeClaim { +func volumeClaimTemplatesForCatalog(instance *databasev4.ShardingDatabase, OraCatalogSpex databasev4.CatalogSpec) []corev1.PersistentVolumeClaim { var claims []corev1.PersistentVolumeClaim @@ -357,9 +357,9 @@ func volumeClaimTemplatesForCatalog(instance *databasev1alpha1.ShardingDatabase, { ObjectMeta: metav1.ObjectMeta{ Name: OraCatalogSpex.Name + "-oradata-vol4", - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, OwnerReferences: getOwnerRef(instance), - Labels: buildLabelsForCatalog(instance, "sharding"), + Labels: buildLabelsForCatalog(instance, "sharding", OraCatalogSpex.Name), }, Spec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{ @@ -392,9 +392,9 @@ func volumeClaimTemplatesForCatalog(instance *databasev1alpha1.ShardingDatabase, pvcClaim := corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: instance.Name + "shared-storage", - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, OwnerReferences: getOwnerRef(instance), - Labels: buildLabelsForCatalog(instance, "sharding"), + Labels: buildLabelsForCatalog(instance, "sharding", OraCatalogSpex.Name), }, Spec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{ @@ -417,7 +417,7 @@ func volumeClaimTemplatesForCatalog(instance *databasev1alpha1.ShardingDatabase, return claims } -func BuildServiceDefForCatalog(instance *databasev1alpha1.ShardingDatabase, replicaCount int32, OraCatalogSpex databasev1alpha1.CatalogSpec, svctype string) *corev1.Service { +func BuildServiceDefForCatalog(instance *databasev4.ShardingDatabase, replicaCount int32, OraCatalogSpex databasev4.CatalogSpec, svctype string) *corev1.Service { //service := &corev1.Service{} service := &corev1.Service{ ObjectMeta: buildSvcObjectMetaForCatalog(instance, replicaCount, OraCatalogSpex, svctype), @@ -432,7 +432,7 @@ func BuildServiceDefForCatalog(instance *databasev1alpha1.ShardingDatabase, repl if svctype == "local" { service.Spec.ClusterIP = corev1.ClusterIPNone - service.Spec.Selector = buildLabelsForCatalog(instance, "sharding") + service.Spec.Selector = getSvcLabelsForCatalog(replicaCount, OraCatalogSpex) } // build Service Ports Specs to be exposed. If the PortMappings is not set then default ports will be exposed. @@ -441,7 +441,7 @@ func BuildServiceDefForCatalog(instance *databasev1alpha1.ShardingDatabase, repl } // Function to build Service ObjectMeta -func buildSvcObjectMetaForCatalog(instance *databasev1alpha1.ShardingDatabase, replicaCount int32, OraCatalogSpex databasev1alpha1.CatalogSpec, svctype string) metav1.ObjectMeta { +func buildSvcObjectMetaForCatalog(instance *databasev4.ShardingDatabase, replicaCount int32, OraCatalogSpex databasev4.CatalogSpec, svctype string) metav1.ObjectMeta { // building objectMeta var svcName string if svctype == "local" { @@ -454,14 +454,14 @@ func buildSvcObjectMetaForCatalog(instance *databasev1alpha1.ShardingDatabase, r objmeta := metav1.ObjectMeta{ Name: svcName, - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, OwnerReferences: getOwnerRef(instance), - Labels: buildLabelsForCatalog(instance, "sharding"), + Labels: buildLabelsForCatalog(instance, "sharding", OraCatalogSpex.Name), } return objmeta } -func getSvcLabelsForCatalog(replicaCount int32, OraCatalogSpex databasev1alpha1.CatalogSpec) map[string]string { +func getSvcLabelsForCatalog(replicaCount int32, OraCatalogSpex databasev4.CatalogSpec) map[string]string { var labelStr map[string]string = make(map[string]string) if replicaCount == -1 { @@ -475,8 +475,8 @@ func getSvcLabelsForCatalog(replicaCount int32, OraCatalogSpex databasev1alpha1. } // ======================== update Section ======================== -func UpdateProvForCatalog(instance *databasev1alpha1.ShardingDatabase, - OraCatalogSpex databasev1alpha1.CatalogSpec, kClient client.Client, sfSet *appsv1.StatefulSet, catalogPod *corev1.Pod, logger logr.Logger, +func UpdateProvForCatalog(instance *databasev4.ShardingDatabase, + OraCatalogSpex databasev4.CatalogSpec, kClient client.Client, sfSet *appsv1.StatefulSet, catalogPod *corev1.Pod, logger logr.Logger, ) (ctrl.Result, error) { var isUpdate bool = false @@ -485,7 +485,7 @@ func UpdateProvForCatalog(instance *databasev1alpha1.ShardingDatabase, var msg string //msg = "Inside the updateProvForCatalog" - //reqLogger := r.Log.WithValues("Instance.Namespace", instance.Spec.Namespace, "Instance.Name", instance.Name) + //reqLogger := r.Log.WithValues("Instance.Namespace", instance.Namespace, "Instance.Name", instance.Name) LogMessages("DEBUG", msg, nil, instance, logger) // Memory Check diff --git a/commons/sharding/exec.go b/commons/sharding/exec.go index 44f91e51..00caa995 100644 --- a/commons/sharding/exec.go +++ b/commons/sharding/exec.go @@ -44,7 +44,7 @@ import ( "net/http" "time" - databasealphav1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + databasev4 "github.com/oracle/oracle-database-operator/apis/database/v4" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" @@ -61,9 +61,9 @@ import ( ) // ExecCMDInContainer execute command in first container of a pod -func ExecCommand(podName string, cmd []string, kubeClient kubernetes.Interface, kubeConfig clientcmd.ClientConfig, instance *databasealphav1.ShardingDatabase, logger logr.Logger) (string, string, error) { +func ExecCommand(podName string, cmd []string, kubeClient kubernetes.Interface, kubeConfig clientcmd.ClientConfig, instance *databasev4.ShardingDatabase, logger logr.Logger) (string, string, error) { - var err1 error = nil + var err1 error = nil var msg string var ( execOut bytes.Buffer @@ -71,28 +71,28 @@ func ExecCommand(podName string, cmd []string, kubeClient kubernetes.Interface, ) for i := 0; i < 5; i++ { - if scheme.Scheme == nil { - time.Sleep(time.Second * 40) - } else { - break - } - } + if scheme.Scheme == nil { + time.Sleep(time.Second * 40) + } else { + break + } + } if kubeClient == nil { - msg = "ExecCommand() : kubeClient is nil" - err1 = fmt.Errorf(msg) - return "Error:","kubeClient is nil",err1 - } + msg = "ExecCommand() : kubeClient is nil" + err1 = fmt.Errorf(msg) + return "Error:", "kubeClient is nil", err1 + } if kubeConfig == nil { - msg = "ExecCommand() : kubeConfig is nil" - err1 = fmt.Errorf(msg) - return "Error:","kubeConfig is nil",err1 + msg = "ExecCommand() : kubeConfig is nil" + err1 = fmt.Errorf(msg) + return "Error:", "kubeConfig is nil", err1 } msg = "" req := kubeClient.CoreV1().RESTClient(). Post(). - Namespace(instance.Spec.Namespace). + Namespace(instance.Namespace). Resource("pods"). Name(podName). SubResource("exec"). @@ -105,6 +105,8 @@ func ExecCommand(podName string, cmd []string, kubeClient kubernetes.Interface, config, err := kubeConfig.ClientConfig() if err != nil { + msg = "Error after executing kubeConfig.ClientConfig" + LogMessages("Error", msg, err, instance, logger) return "Error Occurred", "Error Occurred", err } @@ -136,7 +138,7 @@ func ExecCommand(podName string, cmd []string, kubeClient kubernetes.Interface, return execOut.String(), execErr.String(), nil } -func GetPodCopyConfig(kubeClient kubernetes.Interface, kubeConfig clientcmd.ClientConfig, instance *databasealphav1.ShardingDatabase, logger logr.Logger) (*rest.Config, *kubernetes.Clientset, error) { +func GetPodCopyConfig(kubeClient kubernetes.Interface, kubeConfig clientcmd.ClientConfig, instance *databasev4.ShardingDatabase, logger logr.Logger) (*rest.Config, *kubernetes.Clientset, error) { var clientSet *kubernetes.Clientset config, err := kubeConfig.ClientConfig() @@ -152,7 +154,7 @@ func GetPodCopyConfig(kubeClient kubernetes.Interface, kubeConfig clientcmd.Clie } -func KctlCopyFile(kubeClient kubernetes.Interface, kubeConfig clientcmd.ClientConfig, instance *databasealphav1.ShardingDatabase, restConfig *rest.Config, kclientset *kubernetes.Clientset, logger logr.Logger, src string, dst string, containername string) (*bytes.Buffer, *bytes.Buffer, *bytes.Buffer, error) { +func KctlCopyFile(kubeClient kubernetes.Interface, kubeConfig clientcmd.ClientConfig, instance *databasev4.ShardingDatabase, restConfig *rest.Config, kclientset *kubernetes.Clientset, logger logr.Logger, src string, dst string, containername string) (*bytes.Buffer, *bytes.Buffer, *bytes.Buffer, error) { var in, out, errOut *bytes.Buffer var ioStreams genericclioptions.IOStreams diff --git a/commons/sharding/gsm.go b/commons/sharding/gsm.go index bcdc8866..e6be8770 100644 --- a/commons/sharding/gsm.go +++ b/commons/sharding/gsm.go @@ -44,7 +44,7 @@ import ( "reflect" "strconv" - databasev1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + databasev4 "github.com/oracle/oracle-database-operator/apis/database/v4" "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" @@ -56,7 +56,7 @@ import ( ) // Constants for hello-stateful StatefulSet & Volumes -func buildLabelsForGsm(instance *databasev1alpha1.ShardingDatabase, label string) map[string]string { +func buildLabelsForGsm(instance *databasev4.ShardingDatabase, label string, gsmName string) map[string]string { return map[string]string{ "app": "OracleGsming", "shard_name": "Gsm", @@ -64,7 +64,7 @@ func buildLabelsForGsm(instance *databasev1alpha1.ShardingDatabase, label string } } -func getLabelForGsm(instance *databasev1alpha1.ShardingDatabase) string { +func getLabelForGsm(instance *databasev4.ShardingDatabase) string { // if len(OraGsmSpex.Label) !=0 { // return OraGsmSpex.Label @@ -73,7 +73,7 @@ func getLabelForGsm(instance *databasev1alpha1.ShardingDatabase) string { return instance.Name } -func BuildStatefulSetForGsm(instance *databasev1alpha1.ShardingDatabase, OraGsmSpex databasev1alpha1.GsmSpec) *appsv1.StatefulSet { +func BuildStatefulSetForGsm(instance *databasev4.ShardingDatabase, OraGsmSpex databasev4.GsmSpec) *appsv1.StatefulSet { sfset := &appsv1.StatefulSet{ TypeMeta: buildTypeMetaForGsm(), ObjectMeta: builObjectMetaForGsm(instance, OraGsmSpex), @@ -93,29 +93,29 @@ func buildTypeMetaForGsm() metav1.TypeMeta { } // Function to build ObjectMeta -func builObjectMetaForGsm(instance *databasev1alpha1.ShardingDatabase, OraGsmSpex databasev1alpha1.GsmSpec) metav1.ObjectMeta { +func builObjectMetaForGsm(instance *databasev4.ShardingDatabase, OraGsmSpex databasev4.GsmSpec) metav1.ObjectMeta { // building objectMeta objmeta := metav1.ObjectMeta{ Name: OraGsmSpex.Name, - Namespace: instance.Spec.Namespace, - Labels: buildLabelsForGsm(instance, "sharding"), + Namespace: instance.Namespace, + Labels: buildLabelsForGsm(instance, "sharding", OraGsmSpex.Name), OwnerReferences: getOwnerRef(instance), } return objmeta } // Function to build Stateful Specs -func buildStatefulSpecForGsm(instance *databasev1alpha1.ShardingDatabase, OraGsmSpex databasev1alpha1.GsmSpec) *appsv1.StatefulSetSpec { +func buildStatefulSpecForGsm(instance *databasev4.ShardingDatabase, OraGsmSpex databasev4.GsmSpec) *appsv1.StatefulSetSpec { // building Stateful set Specs sfsetspec := &appsv1.StatefulSetSpec{ ServiceName: OraGsmSpex.Name, Selector: &metav1.LabelSelector{ - MatchLabels: buildLabelsForGsm(instance, "sharding"), + MatchLabels: buildLabelsForGsm(instance, "sharding", OraGsmSpex.Name), }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: buildLabelsForGsm(instance, "sharding"), + Labels: buildLabelsForGsm(instance, "sharding", OraGsmSpex.Name), }, Spec: *buildPodSpecForGsm(instance, OraGsmSpex), }, @@ -136,7 +136,7 @@ func buildStatefulSpecForGsm(instance *databasev1alpha1.ShardingDatabase, OraGsm // Function to build PodSpec -func buildPodSpecForGsm(instance *databasev1alpha1.ShardingDatabase, OraGsmSpex databasev1alpha1.GsmSpec) *corev1.PodSpec { +func buildPodSpecForGsm(instance *databasev4.ShardingDatabase, OraGsmSpex databasev4.GsmSpec) *corev1.PodSpec { user := oraRunAsUser group := oraFsGroup @@ -170,7 +170,7 @@ func buildPodSpecForGsm(instance *databasev1alpha1.ShardingDatabase, OraGsmSpex } // Function to build Volume Spec -func buildVolumeSpecForGsm(instance *databasev1alpha1.ShardingDatabase, OraGsmSpex databasev1alpha1.GsmSpec) []corev1.Volume { +func buildVolumeSpecForGsm(instance *databasev4.ShardingDatabase, OraGsmSpex databasev4.GsmSpec) []corev1.Volume { var result []corev1.Volume result = []corev1.Volume{ { @@ -204,7 +204,7 @@ func buildVolumeSpecForGsm(instance *databasev1alpha1.ShardingDatabase, OraGsmSp } // Function to build the container Specification -func buildContainerSpecForGsm(instance *databasev1alpha1.ShardingDatabase, OraGsmSpex databasev1alpha1.GsmSpec) []corev1.Container { +func buildContainerSpecForGsm(instance *databasev4.ShardingDatabase, OraGsmSpex databasev4.GsmSpec) []corev1.Container { // building Continer spec var result []corev1.Container var masterGsmFlag = false @@ -272,7 +272,7 @@ func buildContainerSpecForGsm(instance *databasev1alpha1.ShardingDatabase, OraGs } // Function to build the init Container Spec -func buildInitContainerSpecForGsm(instance *databasev1alpha1.ShardingDatabase, OraGsmSpex databasev1alpha1.GsmSpec) []corev1.Container { +func buildInitContainerSpecForGsm(instance *databasev4.ShardingDatabase, OraGsmSpex databasev4.GsmSpec) []corev1.Container { var result []corev1.Container // building the init Container Spec privFlag := true @@ -309,7 +309,7 @@ func buildInitContainerSpecForGsm(instance *databasev1alpha1.ShardingDatabase, O return result } -func buildVolumeMountSpecForGsm(instance *databasev1alpha1.ShardingDatabase, OraGsmSpex databasev1alpha1.GsmSpec) []corev1.VolumeMount { +func buildVolumeMountSpecForGsm(instance *databasev4.ShardingDatabase, OraGsmSpex databasev4.GsmSpec) []corev1.VolumeMount { var result []corev1.VolumeMount result = append(result, corev1.VolumeMount{Name: OraGsmSpex.Name + "secretmap-vol3", MountPath: oraSecretMount, ReadOnly: true}) result = append(result, corev1.VolumeMount{Name: OraGsmSpex.Name + "-oradata-vol4", MountPath: oraGsmDataMount}) @@ -325,7 +325,7 @@ func buildVolumeMountSpecForGsm(instance *databasev1alpha1.ShardingDatabase, Ora return result } -func volumeClaimTemplatesForGsm(instance *databasev1alpha1.ShardingDatabase, OraGsmSpex databasev1alpha1.GsmSpec) []corev1.PersistentVolumeClaim { +func volumeClaimTemplatesForGsm(instance *databasev4.ShardingDatabase, OraGsmSpex databasev4.GsmSpec) []corev1.PersistentVolumeClaim { var claims []corev1.PersistentVolumeClaim @@ -337,8 +337,8 @@ func volumeClaimTemplatesForGsm(instance *databasev1alpha1.ShardingDatabase, Ora { ObjectMeta: metav1.ObjectMeta{ Name: OraGsmSpex.Name + "-oradata-vol4", - Namespace: instance.Spec.Namespace, - Labels: buildLabelsForGsm(instance, "sharding"), + Namespace: instance.Namespace, + Labels: buildLabelsForGsm(instance, "sharding", OraGsmSpex.Name), OwnerReferences: getOwnerRef(instance), }, Spec: corev1.PersistentVolumeClaimSpec{ @@ -354,6 +354,14 @@ func volumeClaimTemplatesForGsm(instance *databasev1alpha1.ShardingDatabase, Ora }, }, } + + if len(OraGsmSpex.PvAnnotations) > 0 { + claims[0].ObjectMeta.Annotations = make(map[string]string) + for key, value := range OraGsmSpex.PvAnnotations { + claims[0].ObjectMeta.Annotations[key] = value + } + } + if len(OraGsmSpex.PvMatchLabels) > 0 { claims[0].Spec.Selector = &metav1.LabelSelector{MatchLabels: OraGsmSpex.PvMatchLabels} } @@ -361,7 +369,7 @@ func volumeClaimTemplatesForGsm(instance *databasev1alpha1.ShardingDatabase, Ora return claims } -func BuildServiceDefForGsm(instance *databasev1alpha1.ShardingDatabase, replicaCount int32, OraGsmSpex databasev1alpha1.GsmSpec, svctype string) *corev1.Service { +func BuildServiceDefForGsm(instance *databasev4.ShardingDatabase, replicaCount int32, OraGsmSpex databasev4.GsmSpec, svctype string) *corev1.Service { //service := &corev1.Service{} service := &corev1.Service{ ObjectMeta: buildSvcObjectMetaForGsm(instance, replicaCount, OraGsmSpex, svctype), @@ -376,7 +384,7 @@ func BuildServiceDefForGsm(instance *databasev1alpha1.ShardingDatabase, replicaC if svctype == "local" { service.Spec.ClusterIP = corev1.ClusterIPNone - service.Spec.Selector = buildLabelsForGsm(instance, "sharding") + service.Spec.Selector = getSvcLabelsForGsm(replicaCount, OraGsmSpex) } // build Service Ports Specs to be exposed. If the PortMappings is not set then default ports will be exposed. @@ -385,7 +393,7 @@ func BuildServiceDefForGsm(instance *databasev1alpha1.ShardingDatabase, replicaC } // Function to build Service ObjectMeta -func buildSvcObjectMetaForGsm(instance *databasev1alpha1.ShardingDatabase, replicaCount int32, OraGsmSpex databasev1alpha1.GsmSpec, svctype string) metav1.ObjectMeta { +func buildSvcObjectMetaForGsm(instance *databasev4.ShardingDatabase, replicaCount int32, OraGsmSpex databasev4.GsmSpec, svctype string) metav1.ObjectMeta { // building objectMeta var svcName string if svctype == "local" { @@ -398,14 +406,14 @@ func buildSvcObjectMetaForGsm(instance *databasev1alpha1.ShardingDatabase, repli objmeta := metav1.ObjectMeta{ Name: svcName, - Namespace: instance.Spec.Namespace, - Labels: buildLabelsForGsm(instance, "sharding"), + Namespace: instance.Namespace, + Labels: buildLabelsForGsm(instance, "sharding", OraGsmSpex.Name), OwnerReferences: getOwnerRef(instance), } return objmeta } -func getSvcLabelsForGsm(replicaCount int32, OraGsmSpex databasev1alpha1.GsmSpec) map[string]string { +func getSvcLabelsForGsm(replicaCount int32, OraGsmSpex databasev4.GsmSpec) map[string]string { var labelStr map[string]string = make(map[string]string) if replicaCount == -1 { @@ -419,8 +427,8 @@ func getSvcLabelsForGsm(replicaCount int32, OraGsmSpex databasev1alpha1.GsmSpec) } // This function cleanup the shard from GSM -func OraCleanupForGsm(instance *databasev1alpha1.ShardingDatabase, - OraGsmSpex databasev1alpha1.GsmSpec, +func OraCleanupForGsm(instance *databasev4.ShardingDatabase, + OraGsmSpex databasev4.GsmSpec, oldReplicaSize int32, newReplicaSize int32, ) string { @@ -435,8 +443,8 @@ func OraCleanupForGsm(instance *databasev1alpha1.ShardingDatabase, return err1 } -func UpdateProvForGsm(instance *databasev1alpha1.ShardingDatabase, - OraGsmSpex databasev1alpha1.GsmSpec, kClient client.Client, sfSet *appsv1.StatefulSet, gsmPod *corev1.Pod, logger logr.Logger, +func UpdateProvForGsm(instance *databasev4.ShardingDatabase, + OraGsmSpex databasev4.GsmSpec, kClient client.Client, sfSet *appsv1.StatefulSet, gsmPod *corev1.Pod, logger logr.Logger, ) (ctrl.Result, error) { var msg string diff --git a/commons/sharding/provstatus.go b/commons/sharding/provstatus.go index 87796553..44544c60 100644 --- a/commons/sharding/provstatus.go +++ b/commons/sharding/provstatus.go @@ -42,7 +42,7 @@ import ( "fmt" "strconv" - databasealphav1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + databasev4 "github.com/oracle/oracle-database-operator/apis/database/v4" "github.com/go-logr/logr" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -52,7 +52,7 @@ import ( ) // CHeck if record exist in a struct -func CheckGsmStatusInst(instSpex []databasealphav1.GsmStatusDetails, name string, +func CheckGsmStatusInst(instSpex []databasev4.GsmStatusDetails, name string, ) (int, bool) { var status bool = false @@ -69,9 +69,9 @@ func CheckGsmStatusInst(instSpex []databasealphav1.GsmStatusDetails, name string return idx, status } -func UpdateGsmStatusData(instance *databasealphav1.ShardingDatabase, Specidx int, state string, kubeClient kubernetes.Interface, kubeConfig clientcmd.ClientConfig, logger logr.Logger, +func UpdateGsmStatusData(instance *databasev4.ShardingDatabase, Specidx int, state string, kubeClient kubernetes.Interface, kubeConfig clientcmd.ClientConfig, logger logr.Logger, ) { - if state == string(databasealphav1.AvailableState) { + if state == string(databasev4.AvailableState) { // Evaluate following values only if state is set to available svcName := instance.Spec.Gsm[Specidx].Name + "-0." + instance.Spec.Gsm[Specidx].Name k8sExternalSvcName := svcName + strconv.FormatInt(int64(0), 10) + "-svc." + getInstanceNs(instance) + ".svc.cluster.local" @@ -85,40 +85,40 @@ func UpdateGsmStatusData(instance *databasealphav1.ShardingDatabase, Specidx int // internIp := strings.Replace(K8sExternalSvcIP, "/r/n", "", -1) // Populate the Maps - insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.Name), instance.Spec.Gsm[Specidx].Name) - insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.DbPasswordSecret), DbPasswordSecret) + insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.Name), instance.Spec.Gsm[Specidx].Name) + insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.DbPasswordSecret), DbPasswordSecret) if instance.Spec.IsExternalSvc == true { - insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.K8sExternalSvc), k8sExternalSvcName) - insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.K8sExternalSvcIP), K8sExternalSvcIP) + insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.K8sExternalSvc), k8sExternalSvcName) + insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.K8sExternalSvcIP), K8sExternalSvcIP) } - insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.K8sInternalSvc), K8sInternalSvcName) - insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.K8sInternalSvcIP), K8sInternalSvcIP) - insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.State), state) - } else if state == string(databasealphav1.Terminated) { - removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.Name)) - removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.K8sInternalSvc)) - removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.K8sExternalSvc)) - removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.K8sExternalSvcIP)) - removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.K8sInternalSvcIP)) - removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.Role)) + insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.K8sInternalSvc), K8sInternalSvcName) + insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.K8sInternalSvcIP), K8sInternalSvcIP) + insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.State), state) + } else if state == string(databasev4.Terminated) { + removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.Name)) + removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.K8sInternalSvc)) + removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.K8sExternalSvc)) + removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.K8sExternalSvcIP)) + removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.K8sInternalSvcIP)) + removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.Role)) instance.Status.Gsm.Services = "" } else { - insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.Name), instance.Spec.Gsm[Specidx].Name) - removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.K8sInternalSvc)) - removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.K8sExternalSvc)) - removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.K8sExternalSvcIP)) - removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.K8sInternalSvcIP)) - removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasealphav1.Role)) + insertOrUpdateGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.Name), instance.Spec.Gsm[Specidx].Name) + removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.K8sInternalSvc)) + removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.K8sExternalSvc)) + removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.K8sExternalSvcIP)) + removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.K8sInternalSvcIP)) + removeGsmKeys(instance, instance.Spec.Gsm[Specidx].Name, string(databasev4.Role)) instance.Status.Gsm.Services = "" } } -func UpdateCatalogStatusData(instance *databasealphav1.ShardingDatabase, Specidx int, state string, kubeClient kubernetes.Interface, kubeConfig clientcmd.ClientConfig, logger logr.Logger, +func UpdateCatalogStatusData(instance *databasev4.ShardingDatabase, Specidx int, state string, kubeClient kubernetes.Interface, kubeConfig clientcmd.ClientConfig, logger logr.Logger, ) { mode := GetDbOpenMode(instance.Spec.Catalog[Specidx].Name+"-0", instance, kubeClient, kubeConfig, logger) - if state == string(databasealphav1.AvailableState) { + if state == string(databasev4.AvailableState) { // Evaluate following values only if state is set to available svcName := instance.Spec.Catalog[Specidx].Name + "-0." + instance.Spec.Catalog[Specidx].Name k8sExternalSvcName := svcName + strconv.FormatInt(int64(0), 10) + "-svc." + getInstanceNs(instance) + ".svc.cluster.local" @@ -133,52 +133,52 @@ func UpdateCatalogStatusData(instance *databasealphav1.ShardingDatabase, Specidx // internIp := strings.Replace(K8sExternalSvcIP, "/r/n", "", -1) // Populate the Maps - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.Name), instance.Spec.Catalog[Specidx].Name) - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.DbPasswordSecret), DbPasswordSecret) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.Name), instance.Spec.Catalog[Specidx].Name) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.DbPasswordSecret), DbPasswordSecret) if instance.Spec.IsExternalSvc == true { - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.K8sExternalSvc), k8sExternalSvcName) - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.K8sExternalSvcIP), K8sExternalSvcIP) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.K8sExternalSvc), k8sExternalSvcName) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.K8sExternalSvcIP), K8sExternalSvcIP) } - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.K8sInternalSvc), K8sInternalSvcName) - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.K8sInternalSvcIP), K8sInternalSvcIP) - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.State), state) - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.OracleSid), oracleSid) - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.OraclePdb), oraclePdb) - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.Role), role) - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.OpenMode), mode) - } else if state == string(databasealphav1.Terminated) { - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.State)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.Name)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.K8sInternalSvc)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.K8sExternalSvc)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.K8sExternalSvcIP)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.K8sInternalSvcIP)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.Role)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.OraclePdb)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.OracleSid)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.Role)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.OpenMode)) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.K8sInternalSvc), K8sInternalSvcName) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.K8sInternalSvcIP), K8sInternalSvcIP) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.State), state) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.OracleSid), oracleSid) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.OraclePdb), oraclePdb) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.Role), role) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.OpenMode), mode) + } else if state == string(databasev4.Terminated) { + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.State)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.Name)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.K8sInternalSvc)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.K8sExternalSvc)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.K8sExternalSvcIP)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.K8sInternalSvcIP)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.Role)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.OraclePdb)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.OracleSid)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.Role)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.OpenMode)) } else { - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.State), state) - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.Name), instance.Spec.Catalog[Specidx].Name) - insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.OpenMode), mode) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.K8sInternalSvc)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.K8sExternalSvc)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.K8sExternalSvcIP)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.K8sInternalSvcIP)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.Role)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.OraclePdb)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.OracleSid)) - removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasealphav1.Role)) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.State), state) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.Name), instance.Spec.Catalog[Specidx].Name) + insertOrUpdateCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.OpenMode), mode) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.K8sInternalSvc)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.K8sExternalSvc)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.K8sExternalSvcIP)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.K8sInternalSvcIP)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.Role)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.OraclePdb)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.OracleSid)) + removeCatalogKeys(instance, instance.Spec.Catalog[Specidx].Name, string(databasev4.Role)) } } -func UpdateShardStatusData(instance *databasealphav1.ShardingDatabase, Specidx int, state string, kubeClient kubernetes.Interface, kubeConfig clientcmd.ClientConfig, logger logr.Logger, +func UpdateShardStatusData(instance *databasev4.ShardingDatabase, Specidx int, state string, kubeClient kubernetes.Interface, kubeConfig clientcmd.ClientConfig, logger logr.Logger, ) { mode := GetDbOpenMode(instance.Spec.Shard[Specidx].Name+"-0", instance, kubeClient, kubeConfig, logger) - if state == string(databasealphav1.AvailableState) { + if state == string(databasev4.AvailableState) { // Evaluate following values only if state is set to available svcName := instance.Spec.Shard[Specidx].Name + "-0." + instance.Spec.Shard[Specidx].Name k8sExternalSvcName := svcName + strconv.FormatInt(int64(0), 10) + "-svc." + getInstanceNs(instance) + ".svc.cluster.local" @@ -191,49 +191,49 @@ func UpdateShardStatusData(instance *databasealphav1.ShardingDatabase, Specidx i role := GetDbRole(instance.Spec.Shard[Specidx].Name+"-0", instance, kubeClient, kubeConfig, logger) // Populate the Maps - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.Name), instance.Spec.Shard[Specidx].Name) - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.DbPasswordSecret), DbPasswordSecret) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.Name), instance.Spec.Shard[Specidx].Name) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.DbPasswordSecret), DbPasswordSecret) if instance.Spec.IsExternalSvc == true { - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.K8sExternalSvc), k8sExternalSvcName) - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.K8sExternalSvcIP), K8sExternalSvcIP) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.K8sExternalSvc), k8sExternalSvcName) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.K8sExternalSvcIP), K8sExternalSvcIP) } - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.K8sInternalSvc), K8sInternalSvcName) - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.K8sInternalSvcIP), K8sInternalSvcIP) - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.State), state) - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.OracleSid), oracleSid) - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.OraclePdb), oraclePdb) - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.Role), role) - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.OpenMode), mode) - } else if state == string(databasealphav1.Terminated) { - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.State)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.Name)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.K8sInternalSvc)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.K8sExternalSvc)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.K8sExternalSvcIP)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.K8sInternalSvcIP)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.Role)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.OraclePdb)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.OracleSid)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.Role)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.OpenMode)) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.K8sInternalSvc), K8sInternalSvcName) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.K8sInternalSvcIP), K8sInternalSvcIP) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.State), state) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.OracleSid), oracleSid) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.OraclePdb), oraclePdb) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.Role), role) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.OpenMode), mode) + } else if state == string(databasev4.Terminated) { + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.State)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.Name)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.K8sInternalSvc)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.K8sExternalSvc)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.K8sExternalSvcIP)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.K8sInternalSvcIP)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.Role)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.OraclePdb)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.OracleSid)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.Role)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.OpenMode)) } else { - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.State), state) - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.Name), instance.Spec.Shard[Specidx].Name) - insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.OpenMode), mode) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.K8sInternalSvc)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.K8sExternalSvc)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.K8sExternalSvcIP)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.K8sInternalSvcIP)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.Role)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.OraclePdb)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.OracleSid)) - removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasealphav1.Role)) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.State), state) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.Name), instance.Spec.Shard[Specidx].Name) + insertOrUpdateShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.OpenMode), mode) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.K8sInternalSvc)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.K8sExternalSvc)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.K8sExternalSvcIP)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.K8sInternalSvcIP)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.Role)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.OraclePdb)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.OracleSid)) + removeShardKeys(instance, instance.Spec.Shard[Specidx].Name, string(databasev4.Role)) } } -func insertOrUpdateShardKeys(instance *databasealphav1.ShardingDatabase, name string, key string, value string) { +func insertOrUpdateShardKeys(instance *databasev4.ShardingDatabase, name string, key string, value string) { newKey := name + "_" + key if len(instance.Status.Shard) > 0 { if _, ok := instance.Status.Shard[newKey]; ok { @@ -248,7 +248,7 @@ func insertOrUpdateShardKeys(instance *databasealphav1.ShardingDatabase, name st } -func removeShardKeys(instance *databasealphav1.ShardingDatabase, name string, key string) { +func removeShardKeys(instance *databasev4.ShardingDatabase, name string, key string) { newKey := name + "_" + key if len(instance.Status.Shard) > 0 { if _, ok := instance.Status.Shard[newKey]; ok { @@ -258,7 +258,7 @@ func removeShardKeys(instance *databasealphav1.ShardingDatabase, name string, ke } } -func insertOrUpdateCatalogKeys(instance *databasealphav1.ShardingDatabase, name string, key string, value string) { +func insertOrUpdateCatalogKeys(instance *databasev4.ShardingDatabase, name string, key string, value string) { newKey := name + "_" + key if len(instance.Status.Catalog) > 0 { if _, ok := instance.Status.Catalog[newKey]; ok { @@ -273,7 +273,7 @@ func insertOrUpdateCatalogKeys(instance *databasealphav1.ShardingDatabase, name } -func removeCatalogKeys(instance *databasealphav1.ShardingDatabase, name string, key string) { +func removeCatalogKeys(instance *databasev4.ShardingDatabase, name string, key string) { newKey := name + "_" + key if len(instance.Status.Catalog) > 0 { if _, ok := instance.Status.Catalog[newKey]; ok { @@ -283,7 +283,7 @@ func removeCatalogKeys(instance *databasealphav1.ShardingDatabase, name string, } } -func insertOrUpdateGsmKeys(instance *databasealphav1.ShardingDatabase, name string, key string, value string) { +func insertOrUpdateGsmKeys(instance *databasev4.ShardingDatabase, name string, key string, value string) { newKey := name + "_" + key if len(instance.Status.Gsm.Details) > 0 { if _, ok := instance.Status.Gsm.Details[newKey]; ok { @@ -298,7 +298,7 @@ func insertOrUpdateGsmKeys(instance *databasealphav1.ShardingDatabase, name stri } -func removeGsmKeys(instance *databasealphav1.ShardingDatabase, name string, key string) { +func removeGsmKeys(instance *databasev4.ShardingDatabase, name string, key string) { newKey := name + "_" + key if len(instance.Status.Gsm.Details) > 0 { if _, ok := instance.Status.Gsm.Details[newKey]; ok { @@ -308,18 +308,18 @@ func removeGsmKeys(instance *databasealphav1.ShardingDatabase, name string, key } } -func getInstanceNs(instance *databasealphav1.ShardingDatabase) string { +func getInstanceNs(instance *databasev4.ShardingDatabase) string { var namespace string - if instance.Spec.Namespace == "" { + if instance.Namespace == "" { namespace = "default" } else { - namespace = instance.Spec.Namespace + namespace = instance.Namespace } return namespace } // File the meta condition and return the meta view -func GetMetaCondition(instance *databasealphav1.ShardingDatabase, result *ctrl.Result, err *error, stateType string, stateMsg string) metav1.Condition { +func GetMetaCondition(instance *databasev4.ShardingDatabase, result *ctrl.Result, err *error, stateType string, stateMsg string) metav1.Condition { return metav1.Condition{ Type: stateType, @@ -332,7 +332,7 @@ func GetMetaCondition(instance *databasealphav1.ShardingDatabase, result *ctrl.R } // ======================= CHeck GSM Director Status ============== -func CheckGsmStatus(gname string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +func CheckGsmStatus(gname string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, ) error { var err error var msg string = "Inside the checkGsmStatus. Checking GSM director in " + GetFmtStr(gname) + " pod." @@ -349,17 +349,18 @@ func CheckGsmStatus(gname string, instance *databasealphav1.ShardingDatabase, ku // ============ Functiont o check the status of the Shard and catalog ========= // ================================ Validate shard =========================== -func ValidateDbSetup(podName string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +func ValidateDbSetup(podName string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, ) error { _, _, err := ExecCommand(podName, shardValidationCmd(), kubeClient, kubeconfig, instance, logger) if err != nil { + return fmt.Errorf("error ocurred while validating the DB Setup") } return nil } -func UpdateGsmShardStatus(instance *databasealphav1.ShardingDatabase, name string, state string) { +func UpdateGsmShardStatus(instance *databasev4.ShardingDatabase, name string, state string) { //smap := make(map[string]string) if _, ok := instance.Status.Gsm.Shards[name]; ok { instance.Status.Gsm.Shards[name] = state @@ -383,7 +384,7 @@ func UpdateGsmShardStatus(instance *databasealphav1.ShardingDatabase, name strin } -func GetGsmShardStatus(instance *databasealphav1.ShardingDatabase, name string) string { +func GetGsmShardStatus(instance *databasev4.ShardingDatabase, name string) string { if _, ok := instance.Status.Gsm.Shards[name]; ok { return instance.Status.Gsm.Shards[name] @@ -392,7 +393,7 @@ func GetGsmShardStatus(instance *databasealphav1.ShardingDatabase, name string) } -func GetGsmShardStatusKey(instance *databasealphav1.ShardingDatabase, key string) string { +func GetGsmShardStatusKey(instance *databasev4.ShardingDatabase, key string) string { if _, ok := instance.Status.Shard[key]; ok { return instance.Status.Shard[key] @@ -401,7 +402,7 @@ func GetGsmShardStatusKey(instance *databasealphav1.ShardingDatabase, key string } -func GetGsmCatalogStatusKey(instance *databasealphav1.ShardingDatabase, key string) string { +func GetGsmCatalogStatusKey(instance *databasev4.ShardingDatabase, key string) string { if _, ok := instance.Status.Catalog[key]; ok { return instance.Status.Catalog[key] @@ -410,7 +411,7 @@ func GetGsmCatalogStatusKey(instance *databasealphav1.ShardingDatabase, key stri } -func GetGsmDetailsSttausKey(instance *databasealphav1.ShardingDatabase, key string) string { +func GetGsmDetailsSttausKey(instance *databasev4.ShardingDatabase, key string) string { if _, ok := instance.Status.Gsm.Details[key]; ok { return instance.Status.Gsm.Details[key] diff --git a/commons/sharding/scommon.go b/commons/sharding/scommon.go index 99987661..3b3f1b04 100644 --- a/commons/sharding/scommon.go +++ b/commons/sharding/scommon.go @@ -44,8 +44,7 @@ import ( "fmt" "slices" - databasealphav1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" - databasev1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + databasev4 "github.com/oracle/oracle-database-operator/apis/database/v4" "regexp" "strconv" @@ -103,7 +102,7 @@ const ( ) // Function to build the env var specification -func buildEnvVarsSpec(instance *databasealphav1.ShardingDatabase, variables []databasealphav1.EnvironmentVariable, name string, restype string, masterFlag bool, directorParams string) []corev1.EnvVar { +func buildEnvVarsSpec(instance *databasev4.ShardingDatabase, variables []databasev4.EnvironmentVariable, name string, restype string, masterFlag bool, directorParams string) []corev1.EnvVar { var result []corev1.EnvVar var varinfo string var sidFlag bool = false @@ -295,7 +294,7 @@ func buildEnvVarsSpec(instance *databasealphav1.ShardingDatabase, variables []da } if instance.Spec.InvitedNodeSubnetFlag == "" { - instance.Spec.InvitedNodeSubnetFlag = "FALSE" + instance.Spec.InvitedNodeSubnetFlag = "TRUE" } if strings.ToUpper(instance.Spec.InvitedNodeSubnetFlag) != "FALSE" { @@ -351,7 +350,7 @@ func buildEnvVarsSpec(instance *databasealphav1.ShardingDatabase, variables []da } // FUnction to build the svc definition for catalog/shard and GSM -func buildSvcPortsDef(instance *databasealphav1.ShardingDatabase, resType string) []corev1.ServicePort { +func buildSvcPortsDef(instance *databasev4.ShardingDatabase, resType string) []corev1.ServicePort { var result []corev1.ServicePort if len(instance.Spec.PortMappings) > 0 { for _, portMapping := range instance.Spec.PortMappings { @@ -393,12 +392,12 @@ func generateName(base string) string { } // Function to generate the port mapping -func generatePortMapping(portMapping databasealphav1.PortMapping) string { +func generatePortMapping(portMapping databasev4.PortMapping) string { return generateName(fmt.Sprintf("%s-%d-%d-", "tcp", portMapping.Port, portMapping.TargetPort)) } -func LogMessages(msgtype string, msg string, err error, instance *databasealphav1.ShardingDatabase, logger logr.Logger) { +func LogMessages(msgtype string, msg string, err error, instance *databasev4.ShardingDatabase, logger logr.Logger) { // setting logrus formatter //logrus.SetFormatter(&logrus.JSONFormatter{}) //logrus.SetOutput(os.Stdout) @@ -411,6 +410,8 @@ func LogMessages(msgtype string, msg string, err error, instance *databasealphav } } else if msgtype == "INFO" { logger.Info(msg) + } else if msgtype == "Error" { + logger.Error(err, msg) } } @@ -419,7 +420,7 @@ func GetGsmPodName(gsmName string) string { return podName } -func GetSidName(variables []databasealphav1.EnvironmentVariable, name string) string { +func GetSidName(variables []databasev4.EnvironmentVariable, name string) string { var result string for _, variable := range variables { @@ -433,7 +434,7 @@ func GetSidName(variables []databasealphav1.EnvironmentVariable, name string) st return result } -func GetPdbName(variables []databasealphav1.EnvironmentVariable, name string) string { +func GetPdbName(variables []databasev4.EnvironmentVariable, name string) string { var result string for _, variable := range variables { @@ -447,34 +448,34 @@ func GetPdbName(variables []databasealphav1.EnvironmentVariable, name string) st return result } -func getlabelsForGsm(instance *databasealphav1.ShardingDatabase) map[string]string { - return buildLabelsForGsm(instance, "sharding") +func getlabelsForGsm(instance *databasev4.ShardingDatabase) map[string]string { + return buildLabelsForGsm(instance, "sharding", "gsm") } -func getlabelsForShard(instance *databasealphav1.ShardingDatabase) map[string]string { - return buildLabelsForShard(instance, "sharding") +func getlabelsForShard(instance *databasev4.ShardingDatabase) map[string]string { + return buildLabelsForShard(instance, "sharding", "shard") } -func getlabelsForCatalog(instance *databasealphav1.ShardingDatabase) map[string]string { - return buildLabelsForCatalog(instance, "sharding") +func getlabelsForCatalog(instance *databasev4.ShardingDatabase) map[string]string { + return buildLabelsForCatalog(instance, "sharding", "catalog") } -func LabelsForProvShardKind(instance *databasealphav1.ShardingDatabase, sftype string, +func LabelsForProvShardKind(instance *databasev4.ShardingDatabase, sftype string, ) map[string]string { if sftype == "shard" { - return buildLabelsForShard(instance, "sharding") + return buildLabelsForShard(instance, "sharding", "shard") } return nil } -func CheckSfset(sfsetName string, instance *databasealphav1.ShardingDatabase, kClient client.Client) (*appsv1.StatefulSet, error) { +func CheckSfset(sfsetName string, instance *databasev4.ShardingDatabase, kClient client.Client) (*appsv1.StatefulSet, error) { sfSetFound := &appsv1.StatefulSet{} err := kClient.Get(context.TODO(), types.NamespacedName{ Name: sfsetName, - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, }, sfSetFound) if err != nil { return sfSetFound, err @@ -482,11 +483,11 @@ func CheckSfset(sfsetName string, instance *databasealphav1.ShardingDatabase, kC return sfSetFound, nil } -func checkPvc(pvcName string, instance *databasealphav1.ShardingDatabase, kClient client.Client) (*corev1.PersistentVolumeClaim, error) { +func checkPvc(pvcName string, instance *databasev4.ShardingDatabase, kClient client.Client) (*corev1.PersistentVolumeClaim, error) { pvcFound := &corev1.PersistentVolumeClaim{} err := kClient.Get(context.TODO(), types.NamespacedName{ Name: pvcName, - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, }, pvcFound) if err != nil { return pvcFound, err @@ -494,7 +495,7 @@ func checkPvc(pvcName string, instance *databasealphav1.ShardingDatabase, kClien return pvcFound, nil } -func DelPvc(pvcName string, instance *databasealphav1.ShardingDatabase, kClient client.Client, logger logr.Logger) error { +func DelPvc(pvcName string, instance *databasev4.ShardingDatabase, kClient client.Client, logger logr.Logger) error { LogMessages("DEBUG", "Inside the delPvc and received param: "+GetFmtStr(pvcName), nil, instance, logger) pvcFound, err := checkPvc(pvcName, instance, kClient) @@ -510,7 +511,7 @@ func DelPvc(pvcName string, instance *databasealphav1.ShardingDatabase, kClient return nil } -func DelSvc(pvcName string, instance *databasealphav1.ShardingDatabase, kClient client.Client, logger logr.Logger) error { +func DelSvc(pvcName string, instance *databasev4.ShardingDatabase, kClient client.Client, logger logr.Logger) error { LogMessages("DEBUG", "Inside the delPvc and received param: "+GetFmtStr(pvcName), nil, instance, logger) pvcFound, err := checkPvc(pvcName, instance, kClient) @@ -526,11 +527,11 @@ func DelSvc(pvcName string, instance *databasealphav1.ShardingDatabase, kClient return nil } -func CheckSvc(svcName string, instance *databasealphav1.ShardingDatabase, kClient client.Client) (*corev1.Service, error) { +func CheckSvc(svcName string, instance *databasev4.ShardingDatabase, kClient client.Client) (*corev1.Service, error) { svcFound := &corev1.Service{} err := kClient.Get(context.TODO(), types.NamespacedName{ Name: svcName, - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, }, svcFound) if err != nil { return svcFound, err @@ -538,7 +539,7 @@ func CheckSvc(svcName string, instance *databasealphav1.ShardingDatabase, kClien return svcFound, nil } -func PodListValidation(podList *corev1.PodList, sfName string, instance *databasealphav1.ShardingDatabase, kClient client.Client, +func PodListValidation(podList *corev1.PodList, sfName string, instance *databasev4.ShardingDatabase, kClient client.Client, ) (bool, *corev1.Pod) { var isPodExist bool = false @@ -574,7 +575,7 @@ func PodListValidation(podList *corev1.PodList, sfName string, instance *databas return isPodExist, podInfo } -func GetPodList(sfsetName string, resType string, instance *databasealphav1.ShardingDatabase, kClient client.Client, +func GetPodList(sfsetName string, resType string, instance *databasev4.ShardingDatabase, kClient client.Client, ) (*corev1.PodList, error) { podList := &corev1.PodList{} //labelSelector := labels.SelectorFromSet(getlabelsForGsm(instance)) @@ -595,7 +596,7 @@ func GetPodList(sfsetName string, resType string, instance *databasealphav1.Shar return nil, err1 } - listOps := &client.ListOptions{Namespace: instance.Spec.Namespace, LabelSelector: labelSelector} + listOps := &client.ListOptions{Namespace: instance.Namespace, LabelSelector: labelSelector} err := kClient.List(context.TODO(), podList, listOps) if err != nil { @@ -604,11 +605,11 @@ func GetPodList(sfsetName string, resType string, instance *databasealphav1.Shar return podList, nil } -func checkPod(instance *databasealphav1.ShardingDatabase, pod *corev1.Pod, kClient client.Client, +func checkPod(instance *databasev4.ShardingDatabase, pod *corev1.Pod, kClient client.Client, ) error { err := kClient.Get(context.TODO(), types.NamespacedName{ Name: pod.Name, - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, }, pod) if err != nil { @@ -664,15 +665,15 @@ func checkContainerStatus(pod *corev1.Pod, kClient client.Client, // Namespace related function -func AddNamespace(instance *databasealphav1.ShardingDatabase, kClient client.Client, logger logr.Logger, +func AddNamespace(instance *databasev4.ShardingDatabase, kClient client.Client, logger logr.Logger, ) error { var msg string ns := &corev1.Namespace{} - err := kClient.Get(context.TODO(), types.NamespacedName{Name: instance.Spec.Namespace}, ns) + err := kClient.Get(context.TODO(), types.NamespacedName{Name: instance.Namespace}, ns) if err != nil { - //msg = "Namespace " + instance.Spec.Namespace + " doesn't exist! creating namespace" + //msg = "Namespace " + instance.Namespace + " doesn't exist! creating namespace" if errors.IsNotFound(err) { - err = kClient.Create(context.TODO(), NewNamespace(instance.Spec.Namespace)) + err = kClient.Create(context.TODO(), NewNamespace(instance.Namespace)) if err != nil { msg = "Error in creating namespace!" LogMessages("Error", msg, nil, instance, logger) @@ -700,7 +701,7 @@ func NewNamespace(name string) *corev1.Namespace { } } -func getOwnerRef(instance *databasealphav1.ShardingDatabase, +func getOwnerRef(instance *databasev4.ShardingDatabase, ) []metav1.OwnerReference { var ownerRef []metav1.OwnerReference @@ -708,8 +709,8 @@ func getOwnerRef(instance *databasealphav1.ShardingDatabase, return ownerRef } -func buildCatalogParams(instance *databasealphav1.ShardingDatabase) string { - var variables []databasealphav1.EnvironmentVariable = instance.Spec.Catalog[0].EnvVars +func buildCatalogParams(instance *databasev4.ShardingDatabase) string { + var variables []databasev4.EnvironmentVariable = instance.Spec.Catalog[0].EnvVars var result string var varinfo string var sidFlag bool = false @@ -858,8 +859,8 @@ func buildCatalogParams(instance *databasealphav1.ShardingDatabase) string { return result } -func buildDirectorParams(instance *databasealphav1.ShardingDatabase, oraGsmSpex databasealphav1.GsmSpec, idx int) string { - var variables []databasealphav1.EnvironmentVariable +func buildDirectorParams(instance *databasev4.ShardingDatabase, oraGsmSpex databasev4.GsmSpec, idx int) string { + var variables []databasev4.EnvironmentVariable var result string var varinfo string var dnameFlag bool = false @@ -905,7 +906,7 @@ func buildDirectorParams(instance *databasealphav1.ShardingDatabase, oraGsmSpex return result } -func BuildShardParams(instance *databasealphav1.ShardingDatabase, sfSet *appsv1.StatefulSet, OraShardSpex databasev1alpha1.ShardSpec) string { +func BuildShardParams(instance *databasev4.ShardingDatabase, sfSet *appsv1.StatefulSet, OraShardSpex databasev4.ShardSpec) string { var variables []corev1.EnvVar = sfSet.Spec.Template.Spec.Containers[0].Env var result string var varinfo string @@ -1014,11 +1015,11 @@ func BuildShardParams(instance *databasealphav1.ShardingDatabase, sfSet *appsv1. return result } -func labelsForShardingDatabaseKind(instance *databasealphav1.ShardingDatabase, sftype string, +func labelsForShardingDatabaseKind(instance *databasev4.ShardingDatabase, sftype string, ) map[string]string { if sftype == "shard" { - return buildLabelsForShard(instance, "sharding") + return buildLabelsForShard(instance, "sharding", "shard") } return nil @@ -1199,7 +1200,7 @@ func GetFmtStr(pstr string, return "[" + pstr + "]" } -func ReadConfigMap(cmName string, instance *databasealphav1.ShardingDatabase, kClient client.Client, logger logr.Logger, +func ReadConfigMap(cmName string, instance *databasev4.ShardingDatabase, kClient client.Client, logger logr.Logger, ) (string, string, string, string, string, string) { var region, fingerprint, user, tenancy, passphrase, str1, topicid, k, value string @@ -1210,7 +1211,7 @@ func ReadConfigMap(cmName string, instance *databasealphav1.ShardingDatabase, kC // Reding a config map err = kClient.Get(context.TODO(), types.NamespacedName{ Name: cmName, - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, }, cm) if err != nil { @@ -1253,7 +1254,7 @@ func ReadConfigMap(cmName string, instance *databasealphav1.ShardingDatabase, kC return region, user, tenancy, passphrase, fingerprint, topicid } -func ReadSecret(secName string, instance *databasealphav1.ShardingDatabase, kClient client.Client, logger logr.Logger, +func ReadSecret(secName string, instance *databasev4.ShardingDatabase, kClient client.Client, logger logr.Logger, ) string { var value string @@ -1263,7 +1264,7 @@ func ReadSecret(secName string, instance *databasealphav1.ShardingDatabase, kCli // Reading a Secret var err error = kClient.Get(context.TODO(), types.NamespacedName{ Name: secName, - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, }, sc) if err != nil { @@ -1285,20 +1286,17 @@ func GetK8sClientConfig(kClient client.Client) (clientcmd.ClientConfig, kubernet var kubeConfig clientcmd.ClientConfig var kubeClient kubernetes.Interface - databasealphav1.KubeConfigOnce.Do(func() { - loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() - configOverrides := &clientcmd.ConfigOverrides{} - kubeConfig = clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides) - config, err := kubeConfig.ClientConfig() - if err != nil { - err1 = err - } - kubeClient, err = kubernetes.NewForConfig(config) - if err != nil { - err1 = err - } - - }) + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + configOverrides := &clientcmd.ConfigOverrides{} + kubeConfig = clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides) + config, err := kubeConfig.ClientConfig() + if err != nil { + err1 = err + } + kubeClient, err = kubernetes.NewForConfig(config) + if err != nil { + err1 = err + } return kubeConfig, kubeClient, err1 } @@ -1312,7 +1310,7 @@ func Contains(list []string, s string) bool { } // Function to check shadrd in GSM -func CheckShardInGsm(gsmPodName string, sparams string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +func CheckShardInGsm(gsmPodName string, sparams string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, ) error { _, _, err := ExecCommand(gsmPodName, getShardCheckCmd(sparams), kubeClient, kubeconfig, instance, logger) @@ -1325,7 +1323,7 @@ func CheckShardInGsm(gsmPodName string, sparams string, instance *databasealphav } // Function to check the online Shard -func CheckOnlineShardInGsm(gsmPodName string, sparams string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +func CheckOnlineShardInGsm(gsmPodName string, sparams string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, ) error { _, _, err := ExecCommand(gsmPodName, getOnlineShardCmd(sparams), kubeClient, kubeconfig, instance, logger) @@ -1338,7 +1336,7 @@ func CheckOnlineShardInGsm(gsmPodName string, sparams string, instance *database } // Function to move the chunks -func MoveChunks(gsmPodName string, sparams string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +func MoveChunks(gsmPodName string, sparams string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, ) error { _, _, err := ExecCommand(gsmPodName, getMoveChunksCmd(sparams), kubeClient, kubeconfig, instance, logger) @@ -1351,7 +1349,7 @@ func MoveChunks(gsmPodName string, sparams string, instance *databasealphav1.Sha } // Function to verify the chunks -func VerifyChunks(gsmPodName string, sparams string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +func VerifyChunks(gsmPodName string, sparams string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, ) error { _, _, err := ExecCommand(gsmPodName, getNoChunksCmd(sparams), kubeClient, kubeconfig, instance, logger) if err != nil { @@ -1363,7 +1361,7 @@ func VerifyChunks(gsmPodName string, sparams string, instance *databasealphav1.S } // Function to verify the chunks -func AddShardInGsm(gsmPodName string, sparams string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +func AddShardInGsm(gsmPodName string, sparams string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, ) error { _, _, err := ExecCommand(gsmPodName, getShardAddCmd(sparams), kubeClient, kubeconfig, instance, logger) if err != nil { @@ -1375,7 +1373,7 @@ func AddShardInGsm(gsmPodName string, sparams string, instance *databasealphav1. } // Function to deploy the Shards -func DeployShardInGsm(gsmPodName string, sparams string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +func DeployShardInGsm(gsmPodName string, sparams string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, ) error { _, _, err := ExecCommand(gsmPodName, getdeployShardCmd(), kubeClient, kubeconfig, instance, logger) if err != nil { @@ -1387,7 +1385,7 @@ func DeployShardInGsm(gsmPodName string, sparams string, instance *databasealpha } // Function to verify the chunks -func CancelChunksInGsm(gsmPodName string, sparams string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +func CancelChunksInGsm(gsmPodName string, sparams string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, ) error { _, _, err := ExecCommand(gsmPodName, getCancelChunksCmd(sparams), kubeClient, kubeconfig, instance, logger) if err != nil { @@ -1399,7 +1397,7 @@ func CancelChunksInGsm(gsmPodName string, sparams string, instance *databasealph } // Function to delete the shard -func RemoveShardFromGsm(gsmPodName string, sparams string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +func RemoveShardFromGsm(gsmPodName string, sparams string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, ) error { _, _, err := ExecCommand(gsmPodName, getShardDelCmd(sparams), kubeClient, kubeconfig, instance, logger) if err != nil { @@ -1410,7 +1408,7 @@ func RemoveShardFromGsm(gsmPodName string, sparams string, instance *databasealp return nil } -func GetSvcIp(PodName string, sparams string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +func GetSvcIp(PodName string, sparams string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, ) (string, string, error) { stdoutput, stderror, err := ExecCommand(PodName, GetIpCmd(sparams), kubeClient, kubeconfig, instance, logger) if err != nil { @@ -1421,7 +1419,7 @@ func GetSvcIp(PodName string, sparams string, instance *databasealphav1.Sharding return strings.Replace(stdoutput, "\r\n", "", -1), strings.Replace(stderror, "/r/n", "", -1), nil } -func GetGsmServices(PodName string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +func GetGsmServices(PodName string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, ) string { stdoutput, _, err := ExecCommand(PodName, getGsmSvcCmd(), kubeClient, kubeconfig, instance, logger) if err != nil { @@ -1432,7 +1430,7 @@ func GetGsmServices(PodName string, instance *databasealphav1.ShardingDatabase, return stdoutput } -func GetDbRole(PodName string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +func GetDbRole(PodName string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, ) string { stdoutput, _, err := ExecCommand(PodName, getDbRoleCmd(), kubeClient, kubeconfig, instance, logger) if err != nil { @@ -1443,7 +1441,7 @@ func GetDbRole(PodName string, instance *databasealphav1.ShardingDatabase, kubeC return strings.TrimSpace(stdoutput) } -func GetDbOpenMode(PodName string, instance *databasealphav1.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, +func GetDbOpenMode(PodName string, instance *databasev4.ShardingDatabase, kubeClient kubernetes.Interface, kubeconfig clientcmd.ClientConfig, logger logr.Logger, ) string { stdoutput, _, err := ExecCommand(PodName, getDbModeCmd(), kubeClient, kubeconfig, instance, logger) if err != nil { @@ -1454,7 +1452,7 @@ func GetDbOpenMode(PodName string, instance *databasealphav1.ShardingDatabase, k return strings.TrimSpace(stdoutput) } -func SfsetLabelPatch(sfSetFound *appsv1.StatefulSet, sfSetPod *corev1.Pod, instance *databasealphav1.ShardingDatabase, kClient client.Client, +func SfsetLabelPatch(sfSetFound *appsv1.StatefulSet, sfSetPod *corev1.Pod, instance *databasev4.ShardingDatabase, kClient client.Client, ) error { //var msg string @@ -1462,7 +1460,7 @@ func SfsetLabelPatch(sfSetFound *appsv1.StatefulSet, sfSetPod *corev1.Pod, insta var err error sfsetCopy := sfSetFound.DeepCopy() - sfsetCopy.Labels[string(databasealphav1.ShardingDelLabelKey)] = string(databasealphav1.ShardingDelLabelTrueValue) + sfsetCopy.Labels[string(databasev4.ShardingDelLabelKey)] = string(databasev4.ShardingDelLabelTrueValue) patch := client.MergeFrom(sfSetFound) err = kClient.Patch(context.Background(), sfsetCopy, patch) if err != nil { @@ -1470,7 +1468,7 @@ func SfsetLabelPatch(sfSetFound *appsv1.StatefulSet, sfSetPod *corev1.Pod, insta } podCopy := sfSetPod.DeepCopy() - podCopy.Labels[string(databasealphav1.ShardingDelLabelKey)] = string(databasealphav1.ShardingDelLabelTrueValue) + podCopy.Labels[string(databasev4.ShardingDelLabelKey)] = string(databasev4.ShardingDelLabelTrueValue) podPatch := client.MergeFrom(sfSetPod.DeepCopy()) err = kClient.Patch(context.Background(), podCopy, podPatch) if err != nil { @@ -1480,14 +1478,14 @@ func SfsetLabelPatch(sfSetFound *appsv1.StatefulSet, sfSetPod *corev1.Pod, insta return nil } -func InstanceShardPatch(obj client.Object, instance *databasealphav1.ShardingDatabase, kClient client.Client, id int32, field string, value string, +func InstanceShardPatch(obj client.Object, instance *databasev4.ShardingDatabase, kClient client.Client, id int32, field string, value string, ) error { var err error instSpec := instance.Spec instSpec.Shard[id].IsDelete = "failed" instshardM, _ := json.Marshal(struct { - Spec *databasealphav1.ShardingDatabaseSpec `json:"spec":` + Spec *databasev4.ShardingDatabaseSpec `json:"spec":` }{ Spec: &instSpec, }) @@ -1504,7 +1502,7 @@ func InstanceShardPatch(obj client.Object, instance *databasealphav1.ShardingDat // Send Notification -func SendNotification(title string, body string, instance *databasealphav1.ShardingDatabase, topicId string, rclient ons.NotificationDataPlaneClient, logger logr.Logger, +func SendNotification(title string, body string, instance *databasev4.ShardingDatabase, topicId string, rclient ons.NotificationDataPlaneClient, logger logr.Logger, ) { var msg string req := ons.PublishMessageRequest{TopicId: common.String(topicId), @@ -1525,14 +1523,14 @@ func GetSecretMount() string { return oraSecretMount } -func checkTdeWalletFlag(instance *databasev1alpha1.ShardingDatabase) bool { +func checkTdeWalletFlag(instance *databasev4.ShardingDatabase) bool { if strings.ToLower(instance.Spec.IsTdeWallet) == "enable" { return true } return false } -func CheckIsDeleteFlag(delStr string, instance *databasealphav1.ShardingDatabase, logger logr.Logger) bool { +func CheckIsDeleteFlag(delStr string, instance *databasev4.ShardingDatabase, logger logr.Logger) bool { if strings.ToLower(delStr) == "enable" { return true } @@ -1542,7 +1540,7 @@ func CheckIsDeleteFlag(delStr string, instance *databasealphav1.ShardingDatabase return false } -func getTdeWalletMountLoc(instance *databasev1alpha1.ShardingDatabase) string { +func getTdeWalletMountLoc(instance *databasev4.ShardingDatabase) string { if len(instance.Spec.TdeWalletPvcMountLocation) > 0 { return instance.Spec.TdeWalletPvcMountLocation } diff --git a/commons/sharding/shard.go b/commons/sharding/shard.go index c76fc0e5..e48b56dd 100644 --- a/commons/sharding/shard.go +++ b/commons/sharding/shard.go @@ -43,7 +43,7 @@ import ( "reflect" "strconv" - databasev1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + databasev4 "github.com/oracle/oracle-database-operator/apis/database/v4" "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" @@ -54,7 +54,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -func buildLabelsForShard(instance *databasev1alpha1.ShardingDatabase, label string) map[string]string { +func buildLabelsForShard(instance *databasev4.ShardingDatabase, label string, shardName string) map[string]string { return map[string]string{ "app": "OracleSharding", "type": "Shard", @@ -62,7 +62,7 @@ func buildLabelsForShard(instance *databasev1alpha1.ShardingDatabase, label stri } } -func getLabelForShard(instance *databasev1alpha1.ShardingDatabase) string { +func getLabelForShard(instance *databasev4.ShardingDatabase) string { // if len(OraShardSpex.Label) !=0 { // return OraShardSpex.Label @@ -71,7 +71,7 @@ func getLabelForShard(instance *databasev1alpha1.ShardingDatabase) string { return instance.Name } -func BuildStatefulSetForShard(instance *databasev1alpha1.ShardingDatabase, OraShardSpex databasev1alpha1.ShardSpec) *appsv1.StatefulSet { +func BuildStatefulSetForShard(instance *databasev4.ShardingDatabase, OraShardSpex databasev4.ShardSpec) *appsv1.StatefulSet { sfset := &appsv1.StatefulSet{ TypeMeta: buildTypeMetaForShard(), ObjectMeta: builObjectMetaForShard(instance, OraShardSpex), @@ -92,29 +92,29 @@ func buildTypeMetaForShard() metav1.TypeMeta { } // Function to build ObjectMeta -func builObjectMetaForShard(instance *databasev1alpha1.ShardingDatabase, OraShardSpex databasev1alpha1.ShardSpec) metav1.ObjectMeta { +func builObjectMetaForShard(instance *databasev4.ShardingDatabase, OraShardSpex databasev4.ShardSpec) metav1.ObjectMeta { // building objectMeta objmeta := metav1.ObjectMeta{ Name: OraShardSpex.Name, - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, OwnerReferences: getOwnerRef(instance), - Labels: buildLabelsForShard(instance, "sharding"), + Labels: buildLabelsForShard(instance, "sharding", OraShardSpex.Name), } return objmeta } // Function to build Stateful Specs -func buildStatefulSpecForShard(instance *databasev1alpha1.ShardingDatabase, OraShardSpex databasev1alpha1.ShardSpec) *appsv1.StatefulSetSpec { +func buildStatefulSpecForShard(instance *databasev4.ShardingDatabase, OraShardSpex databasev4.ShardSpec) *appsv1.StatefulSetSpec { // building Stateful set Specs var size int32 = 1 sfsetspec := &appsv1.StatefulSetSpec{ ServiceName: OraShardSpex.Name, Selector: &metav1.LabelSelector{ - MatchLabels: buildLabelsForShard(instance, "sharding"), + MatchLabels: buildLabelsForShard(instance, "sharding", OraShardSpex.Name), }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: buildLabelsForShard(instance, "sharding"), + Labels: buildLabelsForShard(instance, "sharding", OraShardSpex.Name), }, Spec: *buildPodSpecForShard(instance, OraShardSpex), }, @@ -132,7 +132,7 @@ func buildStatefulSpecForShard(instance *databasev1alpha1.ShardingDatabase, OraS // Function to build PodSpec -func buildPodSpecForShard(instance *databasev1alpha1.ShardingDatabase, OraShardSpex databasev1alpha1.ShardSpec) *corev1.PodSpec { +func buildPodSpecForShard(instance *databasev4.ShardingDatabase, OraShardSpex databasev4.ShardSpec) *corev1.PodSpec { user := oraRunAsUser group := oraFsGroup @@ -168,7 +168,7 @@ func buildPodSpecForShard(instance *databasev1alpha1.ShardingDatabase, OraShardS } // Function to build Volume Spec -func buildVolumeSpecForShard(instance *databasev1alpha1.ShardingDatabase, OraShardSpex databasev1alpha1.ShardSpec) []corev1.Volume { +func buildVolumeSpecForShard(instance *databasev4.ShardingDatabase, OraShardSpex databasev4.ShardSpec) []corev1.Volume { var result []corev1.Volume result = []corev1.Volume{ { @@ -208,7 +208,7 @@ func buildVolumeSpecForShard(instance *databasev1alpha1.ShardingDatabase, OraSha } // Function to build the container Specification -func buildContainerSpecForShard(instance *databasev1alpha1.ShardingDatabase, OraShardSpex databasev1alpha1.ShardSpec) []corev1.Container { +func buildContainerSpecForShard(instance *databasev4.ShardingDatabase, OraShardSpex databasev4.ShardSpec) []corev1.Container { // building Continer spec var result []corev1.Container containerSpec := corev1.Container{ @@ -287,7 +287,7 @@ func buildContainerSpecForShard(instance *databasev1alpha1.ShardingDatabase, Ora } // Function to build the init Container Spec -func buildInitContainerSpecForShard(instance *databasev1alpha1.ShardingDatabase, OraShardSpex databasev1alpha1.ShardSpec) []corev1.Container { +func buildInitContainerSpecForShard(instance *databasev4.ShardingDatabase, OraShardSpex databasev4.ShardSpec) []corev1.Container { var result []corev1.Container privFlag := false var uid int64 = 0 @@ -325,7 +325,7 @@ func buildInitContainerSpecForShard(instance *databasev1alpha1.ShardingDatabase, return result } -func buildVolumeMountSpecForShard(instance *databasev1alpha1.ShardingDatabase, OraShardSpex databasev1alpha1.ShardSpec) []corev1.VolumeMount { +func buildVolumeMountSpecForShard(instance *databasev4.ShardingDatabase, OraShardSpex databasev4.ShardSpec) []corev1.VolumeMount { var result []corev1.VolumeMount result = append(result, corev1.VolumeMount{Name: OraShardSpex.Name + "secretmap-vol3", MountPath: oraSecretMount, ReadOnly: true}) result = append(result, corev1.VolumeMount{Name: OraShardSpex.Name + "-oradata-vol4", MountPath: oraDataMount}) @@ -351,7 +351,7 @@ func buildVolumeMountSpecForShard(instance *databasev1alpha1.ShardingDatabase, O return result } -func volumeClaimTemplatesForShard(instance *databasev1alpha1.ShardingDatabase, OraShardSpex databasev1alpha1.ShardSpec) []corev1.PersistentVolumeClaim { +func volumeClaimTemplatesForShard(instance *databasev4.ShardingDatabase, OraShardSpex databasev4.ShardSpec) []corev1.PersistentVolumeClaim { var claims []corev1.PersistentVolumeClaim @@ -363,9 +363,9 @@ func volumeClaimTemplatesForShard(instance *databasev1alpha1.ShardingDatabase, O { ObjectMeta: metav1.ObjectMeta{ Name: OraShardSpex.Name + "-oradata-vol4", - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, OwnerReferences: getOwnerRef(instance), - Labels: buildLabelsForShard(instance, "sharding"), + Labels: buildLabelsForShard(instance, "sharding", OraShardSpex.Name), }, Spec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{ @@ -395,7 +395,7 @@ func volumeClaimTemplatesForShard(instance *databasev1alpha1.ShardingDatabase, O return claims } -func BuildServiceDefForShard(instance *databasev1alpha1.ShardingDatabase, replicaCount int32, OraShardSpex databasev1alpha1.ShardSpec, svctype string) *corev1.Service { +func BuildServiceDefForShard(instance *databasev4.ShardingDatabase, replicaCount int32, OraShardSpex databasev4.ShardSpec, svctype string) *corev1.Service { //service := &corev1.Service{} service := &corev1.Service{ ObjectMeta: buildSvcObjectMetaForShard(instance, replicaCount, OraShardSpex, svctype), @@ -410,7 +410,7 @@ func BuildServiceDefForShard(instance *databasev1alpha1.ShardingDatabase, replic if svctype == "local" { service.Spec.ClusterIP = corev1.ClusterIPNone - service.Spec.Selector = buildLabelsForShard(instance, "sharding") + service.Spec.Selector = getSvcLabelsForShard(replicaCount, OraShardSpex) } // build Service Ports Specs to be exposed. If the PortMappings is not set then default ports will be exposed. @@ -419,7 +419,7 @@ func BuildServiceDefForShard(instance *databasev1alpha1.ShardingDatabase, replic } // Function to build Service ObjectMeta -func buildSvcObjectMetaForShard(instance *databasev1alpha1.ShardingDatabase, replicaCount int32, OraShardSpex databasev1alpha1.ShardSpec, svctype string) metav1.ObjectMeta { +func buildSvcObjectMetaForShard(instance *databasev4.ShardingDatabase, replicaCount int32, OraShardSpex databasev4.ShardSpec, svctype string) metav1.ObjectMeta { // building objectMeta var svcName string @@ -434,14 +434,14 @@ func buildSvcObjectMetaForShard(instance *databasev1alpha1.ShardingDatabase, rep objmeta := metav1.ObjectMeta{ Name: svcName, - Namespace: instance.Spec.Namespace, - Labels: buildLabelsForShard(instance, "sharding"), + Namespace: instance.Namespace, + Labels: buildLabelsForShard(instance, "sharding", OraShardSpex.Name), OwnerReferences: getOwnerRef(instance), } return objmeta } -func getSvcLabelsForShard(replicaCount int32, OraShardSpex databasev1alpha1.ShardSpec) map[string]string { +func getSvcLabelsForShard(replicaCount int32, OraShardSpex databasev4.ShardSpec) map[string]string { var labelStr map[string]string = make(map[string]string) if replicaCount == -1 { @@ -455,7 +455,7 @@ func getSvcLabelsForShard(replicaCount int32, OraShardSpex databasev1alpha1.Shar } // ======================== Update Section ======================== -func UpdateProvForShard(instance *databasev1alpha1.ShardingDatabase, OraShardSpex databasev1alpha1.ShardSpec, kClient client.Client, sfSet *appsv1.StatefulSet, shardPod *corev1.Pod, logger logr.Logger, +func UpdateProvForShard(instance *databasev4.ShardingDatabase, OraShardSpex databasev4.ShardSpec, kClient client.Client, sfSet *appsv1.StatefulSet, shardPod *corev1.Pod, logger logr.Logger, ) (ctrl.Result, error) { var msg string var size int32 = 1 diff --git a/config/crd/bases/database.oracle.com_autonomouscontainerdatabases.yaml b/config/crd/bases/database.oracle.com_autonomouscontainerdatabases.yaml index bac3a28c..1e078b63 100644 --- a/config/crd/bases/database.oracle.com_autonomouscontainerdatabases.yaml +++ b/config/crd/bases/database.oracle.com_autonomouscontainerdatabases.yaml @@ -1,11 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.5 name: autonomouscontainerdatabases.database.oracle.com spec: group: database.oracle.com @@ -32,24 +30,14 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: AutonomousContainerDatabase is the Schema for the autonomouscontainerdatabases - API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: AutonomousContainerDatabaseSpec defines the desired state - of AutonomousContainerDatabase properties: action: enum: @@ -58,8 +46,6 @@ spec: - TERMINATE type: string autonomousContainerDatabaseOCID: - description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - Important: Run "make" to regenerate code after modifying this file' type: string autonomousExadataVMClusterOCID: type: string @@ -75,7 +61,6 @@ spec: default: false type: boolean ociConfig: - description: "*********************** *\tOCI config ***********************" properties: configMapName: type: string @@ -83,21 +68,84 @@ spec: type: string type: object patchModel: - description: 'AutonomousContainerDatabasePatchModelEnum Enum with - underlying type: string' enum: - RELEASE_UPDATES - RELEASE_UPDATE_REVISIONS type: string type: object status: - description: AutonomousContainerDatabaseStatus defines the observed state - of AutonomousContainerDatabase properties: lifecycleState: - description: 'INSERT ADDITIONAL STATUS FIELD - define observed state - of cluster Important: Run "make" to regenerate code after modifying - this file' + type: string + timeCreated: + type: string + required: + - lifecycleState + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.displayName + name: DisplayName + type: string + - jsonPath: .status.lifecycleState + name: State + type: string + - jsonPath: .status.timeCreated + name: Created + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + action: + enum: + - SYNC + - RESTART + - TERMINATE + type: string + autonomousContainerDatabaseOCID: + type: string + autonomousExadataVMClusterOCID: + type: string + compartmentOCID: + type: string + displayName: + type: string + freeformTags: + additionalProperties: + type: string + type: object + hardLink: + default: false + type: boolean + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + patchModel: + enum: + - RELEASE_UPDATES + - RELEASE_UPDATE_REVISIONS + type: string + type: object + status: + properties: + lifecycleState: type: string timeCreated: type: string @@ -109,9 +157,3 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/bases/database.oracle.com_autonomousdatabasebackups.yaml b/config/crd/bases/database.oracle.com_autonomousdatabasebackups.yaml index a5c37507..b0d6f8ed 100644 --- a/config/crd/bases/database.oracle.com_autonomousdatabasebackups.yaml +++ b/config/crd/bases/database.oracle.com_autonomousdatabasebackups.yaml @@ -1,11 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.5 name: autonomousdatabasebackups.database.oracle.com spec: group: database.oracle.com @@ -38,24 +36,14 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: AutonomousDatabaseBackup is the Schema for the autonomousdatabasebackups - API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: AutonomousDatabaseBackupSpec defines the desired state of - AutonomousDatabaseBackup properties: autonomousDatabaseBackupOCID: type: string @@ -64,7 +52,6 @@ spec: isLongTermBackup: type: boolean ociConfig: - description: "*********************** *\tOCI config ***********************" properties: configMapName: type: string @@ -74,11 +61,8 @@ spec: retentionPeriodInDays: type: integer target: - description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - Important: Run "make" to regenerate code after modifying this file' properties: k8sADB: - description: "*********************** *\tADB spec ***********************" properties: name: type: string @@ -91,8 +75,6 @@ spec: type: object type: object status: - description: AutonomousDatabaseBackupStatus defines the observed state - of AutonomousDatabaseBackup properties: autonomousDatabaseOCID: type: string @@ -105,16 +87,103 @@ spec: isAutomatic: type: boolean lifecycleState: - description: 'AutonomousDatabaseBackupLifecycleStateEnum Enum with - underlying type: string' type: string timeEnded: type: string timeStarted: type: string type: - description: 'AutonomousDatabaseBackupTypeEnum Enum with underlying - type: string' + type: string + required: + - autonomousDatabaseOCID + - compartmentOCID + - dbDisplayName + - dbName + - isAutomatic + - lifecycleState + - type + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.lifecycleState + name: State + type: string + - jsonPath: .status.dbDisplayName + name: DB DisplayName + type: string + - jsonPath: .status.type + name: Type + type: string + - jsonPath: .status.timeStarted + name: Started + type: string + - jsonPath: .status.timeEnded + name: Ended + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + autonomousDatabaseBackupOCID: + type: string + displayName: + type: string + isLongTermBackup: + type: boolean + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + retentionPeriodInDays: + type: integer + target: + properties: + k8sADB: + properties: + name: + type: string + type: object + ociADB: + properties: + ocid: + type: string + type: object + type: object + type: object + status: + properties: + autonomousDatabaseOCID: + type: string + compartmentOCID: + type: string + dbDisplayName: + type: string + dbName: + type: string + isAutomatic: + type: boolean + lifecycleState: + type: string + timeEnded: + type: string + timeStarted: + type: string + type: type: string required: - autonomousDatabaseOCID @@ -130,9 +199,3 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/bases/database.oracle.com_autonomousdatabaserestores.yaml b/config/crd/bases/database.oracle.com_autonomousdatabaserestores.yaml index 5e9f2c73..3bfc5a4e 100644 --- a/config/crd/bases/database.oracle.com_autonomousdatabaserestores.yaml +++ b/config/crd/bases/database.oracle.com_autonomousdatabaserestores.yaml @@ -1,11 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.5 name: autonomousdatabaserestores.database.oracle.com spec: group: database.oracle.com @@ -32,27 +30,16 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: AutonomousDatabaseRestore is the Schema for the autonomousdatabaserestores - API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: AutonomousDatabaseRestoreSpec defines the desired state of - AutonomousDatabaseRestore properties: ociConfig: - description: "*********************** *\tOCI config ***********************" properties: configMapName: type: string @@ -62,9 +49,6 @@ spec: source: properties: k8sADBBackup: - description: 'EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO - OWN! NOTE: json tags are required. Any new fields you add must - have json tags for the fields to be serialized.' properties: name: type: string @@ -72,17 +56,12 @@ spec: pointInTime: properties: timestamp: - description: 'The timestamp must follow this format: YYYY-MM-DD - HH:MM:SS GMT' type: string type: object type: object target: - description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - Important: Run "make" to regenerate code after modifying this file' properties: k8sADB: - description: "*********************** *\tADB spec ***********************" properties: name: type: string @@ -98,18 +77,98 @@ spec: - target type: object status: - description: AutonomousDatabaseRestoreStatus defines the observed state - of AutonomousDatabaseRestore properties: dbName: type: string displayName: - description: 'INSERT ADDITIONAL STATUS FIELD - define observed state - of cluster Important: Run "make" to regenerate code after modifying - this file' type: string status: - description: 'WorkRequestStatusEnum Enum with underlying type: string' + type: string + timeAccepted: + type: string + timeEnded: + type: string + timeStarted: + type: string + workRequestOCID: + type: string + required: + - dbName + - displayName + - status + - workRequestOCID + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.displayName + name: DbDisplayName + type: string + - jsonPath: .status.dbName + name: DbName + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + source: + properties: + k8sADBBackup: + properties: + name: + type: string + type: object + pointInTime: + properties: + timestamp: + type: string + type: object + type: object + target: + properties: + k8sADB: + properties: + name: + type: string + type: object + ociADB: + properties: + ocid: + type: string + type: object + type: object + required: + - source + - target + type: object + status: + properties: + dbName: + type: string + displayName: + type: string + status: type: string timeAccepted: type: string @@ -130,9 +189,3 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/bases/database.oracle.com_autonomousdatabases.yaml b/config/crd/bases/database.oracle.com_autonomousdatabases.yaml index f77407f3..1672ae81 100644 --- a/config/crd/bases/database.oracle.com_autonomousdatabases.yaml +++ b/config/crd/bases/database.oracle.com_autonomousdatabases.yaml @@ -1,11 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.5 name: autonomousdatabases.database.oracle.com spec: group: database.oracle.com @@ -47,63 +45,400 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: AutonomousDatabase is the Schema for the autonomousdatabases - API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: 'AutonomousDatabaseSpec defines the desired state of AutonomousDatabase - Important: Run "make" to regenerate code after modifying this file' properties: + action: + enum: + - "" + - Create + - Sync + - Update + - Stop + - Start + - Terminate + - Clone + type: string + clone: + properties: + adminPassword: + properties: + k8sSecret: + properties: + name: + type: string + type: object + ociSecret: + properties: + id: + type: string + type: object + type: object + autonomousContainerDatabase: + properties: + k8sAcd: + properties: + name: + type: string + type: object + ociAcd: + properties: + id: + type: string + type: object + type: object + cloneType: + enum: + - FULL + - METADATA + type: string + compartmentId: + type: string + computeCount: + type: number + computeModel: + enum: + - ECPU + - OCPU + type: string + cpuCoreCount: + type: integer + dataStorageSizeInTBs: + type: integer + dbName: + type: string + dbVersion: + type: string + dbWorkload: + enum: + - OLTP + - DW + - AJD + - APEX + type: string + displayName: + type: string + freeformTags: + additionalProperties: + type: string + type: object + isAccessControlEnabled: + type: boolean + isAutoScalingEnabled: + type: boolean + isDedicated: + type: boolean + isFreeTier: + type: boolean + isMtlsConnectionRequired: + type: boolean + licenseModel: + enum: + - LICENSE_INCLUDED + - BRING_YOUR_OWN_LICENSE + type: string + nsgIds: + items: + type: string + type: array + ocpuCount: + type: number + privateEndpointLabel: + type: string + subnetId: + type: string + whitelistedIps: + items: + type: string + type: array + type: object details: - description: AutonomousDatabaseDetails defines the detail information - of AutonomousDatabase, corresponding to oci-go-sdk/database/AutonomousDatabase properties: adminPassword: properties: k8sSecret: - description: "*********************** *\tSecret specs ***********************" properties: name: type: string type: object ociSecret: properties: - ocid: + id: + type: string + type: object + type: object + autonomousContainerDatabase: + properties: + k8sAcd: + properties: + name: + type: string + type: object + ociAcd: + properties: + id: + type: string + type: object + type: object + compartmentId: + type: string + computeCount: + type: number + computeModel: + enum: + - ECPU + - OCPU + type: string + cpuCoreCount: + type: integer + dataStorageSizeInTBs: + type: integer + dbName: + type: string + dbVersion: + type: string + dbWorkload: + enum: + - OLTP + - DW + - AJD + - APEX + type: string + displayName: + type: string + freeformTags: + additionalProperties: + type: string + type: object + id: + type: string + isAccessControlEnabled: + type: boolean + isAutoScalingEnabled: + type: boolean + isDedicated: + type: boolean + isFreeTier: + type: boolean + isMtlsConnectionRequired: + type: boolean + licenseModel: + enum: + - LICENSE_INCLUDED + - BRING_YOUR_OWN_LICENSE + type: string + nsgIds: + items: + type: string + type: array + ocpuCount: + type: number + privateEndpointLabel: + type: string + subnetId: + type: string + whitelistedIps: + items: + type: string + type: array + type: object + hardLink: + default: false + type: boolean + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + wallet: + properties: + name: + type: string + password: + properties: + k8sSecret: + properties: + name: + type: string + type: object + ociSecret: + properties: + id: + type: string + type: object + type: object + type: object + required: + - action + type: object + status: + properties: + allConnectionStrings: + items: + properties: + connectionStrings: + items: + properties: + connectionString: + type: string + tnsName: + type: string + type: object + type: array + tlsAuthentication: + type: string + required: + - connectionStrings + type: object + type: array + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + lifecycleState: + type: string + timeCreated: + type: string + walletExpiringDate: + type: string + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.details.displayName + name: Display Name + type: string + - jsonPath: .spec.details.dbName + name: Db Name + type: string + - jsonPath: .status.lifecycleState + name: State + type: string + - jsonPath: .spec.details.isDedicated + name: Dedicated + type: string + - jsonPath: .spec.details.cpuCoreCount + name: OCPUs + type: integer + - jsonPath: .spec.details.dataStorageSizeInTBs + name: Storage (TB) + type: integer + - jsonPath: .spec.details.dbWorkload + name: Workload Type + type: string + - jsonPath: .status.timeCreated + name: Created + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + action: + enum: + - "" + - Create + - Sync + - Update + - Stop + - Start + - Terminate + - Clone + type: string + clone: + properties: + adminPassword: + properties: + k8sSecret: + properties: + name: + type: string + type: object + ociSecret: + properties: + id: type: string type: object type: object autonomousContainerDatabase: - description: ACDSpec defines the spec of the target for backup/restore - runs. The name could be the name of an AutonomousDatabase or - an AutonomousDatabaseBackup properties: - k8sACD: - description: "*********************** *\tACD specs ***********************" + k8sAcd: properties: name: type: string type: object - ociACD: + ociAcd: properties: - ocid: + id: type: string type: object type: object - autonomousDatabaseOCID: + cloneType: + enum: + - FULL + - METADATA type: string - compartmentOCID: + compartmentId: + type: string + computeCount: + type: number + computeModel: + enum: + - ECPU + - OCPU type: string cpuCoreCount: type: integer @@ -114,8 +449,6 @@ spec: dbVersion: type: string dbWorkload: - description: 'AutonomousDatabaseDbWorkloadEnum Enum with underlying - type: string' enum: - OLTP - DW @@ -128,87 +461,158 @@ spec: additionalProperties: type: string type: object + isAccessControlEnabled: + type: boolean isAutoScalingEnabled: type: boolean isDedicated: type: boolean + isFreeTier: + type: boolean + isMtlsConnectionRequired: + type: boolean licenseModel: - description: 'AutonomousDatabaseLicenseModelEnum Enum with underlying - type: string' enum: - LICENSE_INCLUDED - BRING_YOUR_OWN_LICENSE type: string - lifecycleState: - description: 'AutonomousDatabaseLifecycleStateEnum Enum with underlying - type: string' + nsgIds: + items: + type: string + type: array + ocpuCount: + type: number + privateEndpointLabel: + type: string + subnetId: type: string - networkAccess: + whitelistedIps: + items: + type: string + type: array + type: object + details: + properties: + adminPassword: properties: - accessControlList: - items: - type: string - type: array - accessType: - enum: - - "" - - PUBLIC - - RESTRICTED - - PRIVATE - type: string - isAccessControlEnabled: - type: boolean - isMTLSConnectionRequired: - type: boolean - privateEndpoint: + k8sSecret: properties: - hostnamePrefix: + name: type: string - nsgOCIDs: - items: - type: string - type: array - subnetOCID: + type: object + ociSecret: + properties: + id: type: string type: object type: object - wallet: + autonomousContainerDatabase: properties: - name: - type: string - password: + k8sAcd: properties: - k8sSecret: - description: "*********************** *\tSecret specs - ***********************" - properties: - name: - type: string - type: object - ociSecret: - properties: - ocid: - type: string - type: object + name: + type: string type: object + ociAcd: + properties: + id: + type: string + type: object + type: object + compartmentId: + type: string + computeCount: + type: number + computeModel: + enum: + - ECPU + - OCPU + type: string + cpuCoreCount: + type: integer + dataStorageSizeInTBs: + type: integer + dbName: + type: string + dbVersion: + type: string + dbWorkload: + enum: + - OLTP + - DW + - AJD + - APEX + type: string + displayName: + type: string + freeformTags: + additionalProperties: + type: string type: object + id: + type: string + isAccessControlEnabled: + type: boolean + isAutoScalingEnabled: + type: boolean + isDedicated: + type: boolean + isFreeTier: + type: boolean + isMtlsConnectionRequired: + type: boolean + licenseModel: + enum: + - LICENSE_INCLUDED + - BRING_YOUR_OWN_LICENSE + type: string + nsgIds: + items: + type: string + type: array + ocpuCount: + type: number + privateEndpointLabel: + type: string + subnetId: + type: string + whitelistedIps: + items: + type: string + type: array type: object hardLink: default: false type: boolean ociConfig: - description: "*********************** *\tOCI config ***********************" properties: configMapName: type: string secretName: type: string type: object + wallet: + properties: + name: + type: string + password: + properties: + k8sSecret: + properties: + name: + type: string + type: object + ociSecret: + properties: + id: + type: string + type: object + type: object + type: object required: - - details + - action type: object status: - description: AutonomousDatabaseStatus defines the observed state of AutonomousDatabase properties: allConnectionStrings: items: @@ -230,63 +634,29 @@ spec: type: array conditions: items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n \ttype FooStatus struct{ \t // Represents the observations - of a foo's current state. \t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\" \t // - +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map - \t // +listMapKey=type \t Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields - \t}" properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. maxLength: 1024 minLength: 1 pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: - description: status of the condition, one of True, False, Unknown. enum: - "True" - "False" - Unknown type: string type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -302,9 +672,6 @@ spec: - type x-kubernetes-list-type: map lifecycleState: - description: 'INSERT ADDITIONAL STATUS FIELD - define observed state - of cluster Important: Run "make" to regenerate code after modifying - this file' type: string timeCreated: type: string @@ -316,9 +683,3 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/bases/database.oracle.com_cdbs.yaml b/config/crd/bases/database.oracle.com_cdbs.yaml index 6b1c350c..8ea594e6 100644 --- a/config/crd/bases/database.oracle.com_cdbs.yaml +++ b/config/crd/bases/database.oracle.com_cdbs.yaml @@ -1,11 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.5 name: cdbs.database.oracle.com spec: group: database.oracle.com @@ -29,10 +27,6 @@ spec: jsonPath: .spec.dbPort name: DB Port type: integer - - description: ' string of the tnsalias' - jsonPath: .spec.dbTnsurl - name: TNS STRING - type: string - description: Replicas jsonPath: .spec.replicas name: Replicas @@ -45,31 +39,25 @@ spec: jsonPath: .status.msg name: Message type: string - name: v1alpha1 + - description: ' string of the tnsalias' + jsonPath: .spec.dbTnsurl + name: TNS STRING + type: string + name: v4 schema: openAPIV3Schema: - description: CDB is the Schema for the cdbs API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: CDBSpec defines the desired state of CDB properties: cdbAdminPwd: - description: Password for the CDB Administrator to manage PDB lifecycle properties: secret: - description: CDBSecret defines the secretName properties: key: type: string @@ -83,11 +71,8 @@ spec: - secret type: object cdbAdminUser: - description: User in the root container with sysdba priviledges to - manage PDB lifecycle properties: secret: - description: CDBSecret defines the secretName properties: key: type: string @@ -101,12 +86,40 @@ spec: - secret type: object cdbName: - description: Name of the CDB type: string + cdbOrdsPrvKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbOrdsPubKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object cdbTlsCrt: properties: secret: - description: CDBSecret defines the secretName properties: key: type: string @@ -122,7 +135,6 @@ spec: cdbTlsKey: properties: secret: - description: CDBSecret defines the secretName properties: key: type: string @@ -136,40 +148,31 @@ spec: - secret type: object dbPort: - description: DB server port type: integer dbServer: - description: Name of the DB server type: string dbTnsurl: type: string + deletePdbCascade: + type: boolean nodeSelector: additionalProperties: type: string - description: Node Selector for running the Pod type: object ordsImage: - description: ORDS Image Name type: string ordsImagePullPolicy: - description: ORDS Image Pull Policy enum: - Always - Never type: string ordsImagePullSecret: - description: The name of the image pull secret in case of a private - docker repository. type: string ordsPort: - description: ORDS server port. For now, keep it as 8888. TO BE USED - IN FUTURE RELEASE. type: integer ordsPwd: - description: Password for user ORDS_PUBLIC_USER properties: secret: - description: CDBSecret defines the secretName properties: key: type: string @@ -183,16 +186,12 @@ spec: - secret type: object replicas: - description: Number of ORDS Containers to create type: integer serviceName: - description: Name of the CDB Service type: string sysAdminPwd: - description: Password for the CDB System Administrator properties: secret: - description: CDBSecret defines the secretName properties: key: type: string @@ -206,10 +205,8 @@ spec: - secret type: object webServerPwd: - description: Password for the Web Server User properties: secret: - description: CDBSecret defines the secretName properties: key: type: string @@ -223,11 +220,8 @@ spec: - secret type: object webServerUser: - description: Web Server User with SQL Administrator role to allow - us to authenticate to the PDB Lifecycle Management REST endpoints properties: secret: - description: CDBSecret defines the secretName properties: key: type: string @@ -242,16 +236,12 @@ spec: type: object type: object status: - description: CDBStatus defines the observed state of CDB properties: msg: - description: Message type: string phase: - description: Phase of the CDB Resource type: string status: - description: CDB Resource Status type: boolean required: - phase @@ -262,9 +252,3 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/bases/database.oracle.com_dataguardbrokers.yaml b/config/crd/bases/database.oracle.com_dataguardbrokers.yaml index f19a3e22..5efceff4 100644 --- a/config/crd/bases/database.oracle.com_dataguardbrokers.yaml +++ b/config/crd/bases/database.oracle.com_dataguardbrokers.yaml @@ -1,11 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.5 name: dataguardbrokers.database.oracle.com spec: group: database.oracle.com @@ -40,41 +38,120 @@ spec: - jsonPath: .status.status name: Status type: string + - jsonPath: .status.fastStartFailover + name: FSFO + type: string name: v1alpha1 schema: openAPIV3Schema: - description: DataguardBroker is the Schema for the dataguardbrokers API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: DataguardBrokerSpec defines the desired state of DataguardBroker properties: - fastStartFailOver: - properties: - enable: - type: boolean - strategy: - items: - description: FSFO strategy - properties: - sourceDatabaseRef: - type: string - targetDatabaseRefs: - type: string - type: object - type: array + fastStartFailover: + type: boolean + loadBalancer: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + primaryDatabaseRef: + type: string + protectionMode: + enum: + - MaxPerformance + - MaxAvailability + type: string + serviceAnnotations: + additionalProperties: + type: string + type: object + setAsPrimaryDatabase: + type: string + standbyDatabaseRefs: + items: + type: string + type: array + required: + - primaryDatabaseRef + - protectionMode + - standbyDatabaseRefs + type: object + status: + properties: + clusterConnectString: + type: string + databasesInDataguardConfig: + additionalProperties: + type: string type: object + externalConnectString: + type: string + fastStartFailover: + type: boolean + primaryDatabase: + type: string + primaryDatabaseRef: + type: string + protectionMode: + type: string + standbyDatabases: + type: string + status: + type: string + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.primaryDatabase + name: Primary + type: string + - jsonPath: .status.standbyDatabases + name: Standbys + type: string + - jsonPath: .spec.protectionMode + name: Protection Mode + type: string + - jsonPath: .status.clusterConnectString + name: Cluster Connect Str + priority: 1 + type: string + - jsonPath: .status.externalConnectString + name: Connect Str + type: string + - jsonPath: .spec.primaryDatabaseRef + name: Primary Database + priority: 1 + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.fastStartFailover + name: FSFO + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + fastStartFailover: + type: boolean loadBalancer: type: boolean nodeSelector: @@ -104,12 +181,17 @@ spec: - standbyDatabaseRefs type: object status: - description: DataguardBrokerStatus defines the observed state of DataguardBroker properties: clusterConnectString: type: string + databasesInDataguardConfig: + additionalProperties: + type: string + type: object externalConnectString: type: string + fastStartFailover: + type: boolean primaryDatabase: type: string primaryDatabaseRef: @@ -126,9 +208,3 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/bases/database.oracle.com_dbcssystems.yaml b/config/crd/bases/database.oracle.com_dbcssystems.yaml index 3f4b1c46..468d7612 100644 --- a/config/crd/bases/database.oracle.com_dbcssystems.yaml +++ b/config/crd/bases/database.oracle.com_dbcssystems.yaml @@ -1,11 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.5 name: dbcssystems.database.oracle.com spec: group: database.oracle.com @@ -19,23 +17,60 @@ spec: - name: v1alpha1 schema: openAPIV3Schema: - description: DbcsSystem is the Schema for the dbcssystems API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: DbcsSystemSpec defines the desired state of DbcsSystem properties: + databaseId: + type: string + dbBackupId: + type: string + dbClone: + properties: + dbAdminPaswordSecret: + type: string + dbDbUniqueName: + type: string + dbName: + type: string + displayName: + type: string + domain: + type: string + hostName: + type: string + initialDataStorageSizeInGB: + type: integer + kmsKeyId: + type: string + kmsKeyVersionId: + type: string + licenseModel: + type: string + privateIp: + type: string + sidPrefix: + type: string + sshPublicKeys: + items: + type: string + type: array + subnetId: + type: string + tdeWalletPasswordSecret: + type: string + required: + - dbDbUniqueName + - dbName + - displayName + - hostName + - subnetId + type: object dbSystem: properties: availabilityDomain: @@ -51,7 +86,6 @@ spec: dbAdminPaswordSecret: type: string dbBackupConfig: - description: DB Backup COnfig Network Struct properties: autoBackupEnabled: type: boolean @@ -88,12 +122,394 @@ spec: type: string initialDataStorageSizeInGB: type: integer + kmsConfig: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyName: + type: string + vaultName: + type: string + vaultType: + type: string + type: object + licenseModel: + type: string + nodeCount: + type: integer + pdbName: + type: string + privateIp: + type: string + shape: + type: string + sshPublicKeys: + items: + type: string + type: array + storageManagement: + type: string + subnetId: + type: string + tags: + additionalProperties: + type: string + type: object + tdeWalletPasswordSecret: + type: string + timeZone: + type: string + required: + - availabilityDomain + - compartmentId + - dbAdminPaswordSecret + - hostName + - shape + - subnetId + type: object + hardLink: + type: boolean + id: + type: string + kmsConfig: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyName: + type: string + vaultName: + type: string + vaultType: + type: string + type: object + ociConfigMap: + type: string + ociSecret: + type: string + pdbConfigs: + items: + properties: + freeformTags: + additionalProperties: + type: string + type: object + isDelete: + type: boolean + pdbAdminPassword: + type: string + pdbName: + type: string + pluggableDatabaseId: + type: string + shouldPdbAdminAccountBeLocked: + type: boolean + tdeWalletPassword: + type: string + required: + - freeformTags + - pdbAdminPassword + - pdbName + - shouldPdbAdminAccountBeLocked + - tdeWalletPassword + type: object + type: array + setupDBCloning: + type: boolean + required: + - ociConfigMap + type: object + status: + properties: + availabilityDomain: + type: string + cpuCoreCount: + type: integer + dataStoragePercentage: + type: integer + dataStorageSizeInGBs: + type: integer + dbCloneStatus: + properties: + dbAdminPaswordSecret: + type: string + dbDbUniqueName: + type: string + dbName: + type: string + displayName: + type: string + domain: + type: string + hostName: + type: string + id: + type: string + licenseModel: + type: string + sshPublicKeys: + items: + type: string + type: array + subnetId: + type: string + required: + - dbDbUniqueName + - hostName + type: object + dbEdition: + type: string + dbInfo: + items: + properties: + dbHomeId: + type: string + dbName: + type: string + dbUniqueName: + type: string + dbWorkload: + type: string + id: + type: string + type: object + type: array + displayName: + type: string + id: + type: string + kmsDetailsStatus: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyId: + type: string + keyName: + type: string + managementEndpoint: + type: string + vaultId: + type: string + vaultName: + type: string + vaultType: + type: string + type: object + licenseModel: + type: string + network: + properties: + clientSubnet: + type: string + domainName: + type: string + hostName: + type: string + listenerPort: + type: integer + networkSG: + type: string + scanDnsName: + type: string + vcnName: + type: string + type: object + nodeCount: + type: integer + pdbDetailsStatus: + items: + properties: + pdbConfigStatus: + items: + properties: + freeformTags: + additionalProperties: + type: string + type: object + pdbName: + type: string + pdbState: + type: string + pluggableDatabaseId: + type: string + shouldPdbAdminAccountBeLocked: + type: boolean + required: + - freeformTags + - pdbName + - shouldPdbAdminAccountBeLocked + type: object + type: array + type: object + type: array + recoStorageSizeInGB: + type: integer + shape: + type: string + state: + type: string + storageManagement: + type: string + subnetId: + type: string + timeZone: + type: string + workRequests: + items: + properties: + operationId: + type: string + operationType: + type: string + percentComplete: + type: string + timeAccepted: + type: string + timeFinished: + type: string + timeStarted: + type: string + required: + - operationId + - operationType + type: object + type: array + required: + - state + type: object + type: object + served: true + storage: false + subresources: + status: {} + - name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + databaseId: + type: string + dbBackupId: + type: string + dbClone: + properties: + dbAdminPasswordSecret: + type: string + dbDbUniqueName: + type: string + dbName: + type: string + displayName: + type: string + domain: + type: string + hostName: + type: string + initialDataStorageSizeInGB: + type: integer kmsKeyId: type: string kmsKeyVersionId: type: string licenseModel: type: string + privateIp: + type: string + sidPrefix: + type: string + sshPublicKeys: + items: + type: string + type: array + subnetId: + type: string + tdeWalletPasswordSecret: + type: string + required: + - dbDbUniqueName + - dbName + - displayName + - hostName + - subnetId + type: object + dbSystem: + properties: + availabilityDomain: + type: string + backupSubnetId: + type: string + clusterName: + type: string + compartmentId: + type: string + cpuCoreCount: + type: integer + dbAdminPasswordSecret: + type: string + dbBackupConfig: + properties: + autoBackupEnabled: + type: boolean + autoBackupWindow: + type: string + backupDestinationDetails: + type: string + recoveryWindowsInDays: + type: integer + type: object + dbDomain: + type: string + dbEdition: + type: string + dbName: + type: string + dbUniqueName: + type: string + dbVersion: + type: string + dbWorkload: + type: string + diskRedundancy: + type: string + displayName: + type: string + domain: + type: string + faultDomains: + items: + type: string + type: array + hostName: + type: string + initialDataStorageSizeInGB: + type: integer + kmsConfig: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyName: + type: string + vaultName: + type: string + vaultType: + type: string + type: object + licenseModel: + type: string nodeCount: type: integer pdbName: @@ -121,25 +537,65 @@ spec: required: - availabilityDomain - compartmentId - - dbAdminPaswordSecret + - dbAdminPasswordSecret - hostName - shape - - sshPublicKeys - subnetId type: object hardLink: type: boolean id: type: string + kmsConfig: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyName: + type: string + vaultName: + type: string + vaultType: + type: string + type: object ociConfigMap: type: string ociSecret: type: string + pdbConfigs: + items: + properties: + freeformTags: + additionalProperties: + type: string + type: object + isDelete: + type: boolean + pdbAdminPassword: + type: string + pdbName: + type: string + pluggableDatabaseId: + type: string + shouldPdbAdminAccountBeLocked: + type: boolean + tdeWalletPassword: + type: string + required: + - freeformTags + - pdbAdminPassword + - pdbName + - shouldPdbAdminAccountBeLocked + - tdeWalletPassword + type: object + type: array + setupDBCloning: + type: boolean required: - ociConfigMap type: object status: - description: DbcsSystemStatus defines the observed state of DbcsSystem properties: availabilityDomain: type: string @@ -149,11 +605,38 @@ spec: type: integer dataStorageSizeInGBs: type: integer + dbCloneStatus: + properties: + dbAdminPaswordSecret: + type: string + dbDbUniqueName: + type: string + dbName: + type: string + displayName: + type: string + domain: + type: string + hostName: + type: string + id: + type: string + licenseModel: + type: string + sshPublicKeys: + items: + type: string + type: array + subnetId: + type: string + required: + - dbDbUniqueName + - hostName + type: object dbEdition: type: string dbInfo: items: - description: DbcsSystemStatus defines the observed state of DbcsSystem properties: dbHomeId: type: string @@ -171,6 +654,25 @@ spec: type: string id: type: string + kmsDetailsStatus: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyId: + type: string + keyName: + type: string + managementEndpoint: + type: string + vaultId: + type: string + vaultName: + type: string + vaultType: + type: string + type: object licenseModel: type: string network: @@ -192,6 +694,28 @@ spec: type: object nodeCount: type: integer + pdbDetailsStatus: + items: + properties: + pdbConfigStatus: + items: + properties: + freeformTags: + additionalProperties: + type: string + type: object + pdbName: + type: string + pdbState: + type: string + pluggableDatabaseId: + type: string + shouldPdbAdminAccountBeLocked: + type: boolean + type: object + type: array + type: object + type: array recoStorageSizeInGB: type: integer shape: @@ -232,9 +756,3 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/bases/database.oracle.com_lrests.yaml b/config/crd/bases/database.oracle.com_lrests.yaml new file mode 100644 index 00000000..c20356e7 --- /dev/null +++ b/config/crd/bases/database.oracle.com_lrests.yaml @@ -0,0 +1,254 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: lrests.database.oracle.com +spec: + group: database.oracle.com + names: + kind: LREST + listKind: LRESTList + plural: lrests + singular: lrest + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Name of the LREST + jsonPath: .spec.cdbName + name: CDB NAME + type: string + - description: ' Name of the DB Server' + jsonPath: .spec.dbServer + name: DB Server + type: string + - description: DB server port + jsonPath: .spec.dbPort + name: DB Port + type: integer + - description: Replicas + jsonPath: .spec.replicas + name: Replicas + type: integer + - description: Status of the LREST Resource + jsonPath: .status.phase + name: Status + type: string + - description: Error message if any + jsonPath: .status.msg + name: Message + type: string + - description: string of the tnsalias + jsonPath: .spec.dbTnsurl + name: TNS STRING + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + cdbAdminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbAdminUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbName: + type: string + cdbPrvKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbPubKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbTlsCrt: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbTlsKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + dbPort: + type: integer + dbServer: + type: string + dbTnsurl: + type: string + deletePdbCascade: + type: boolean + lrestImage: + type: string + lrestImagePullPolicy: + enum: + - Always + - Never + type: string + lrestImagePullSecret: + type: string + lrestPort: + type: integer + lrestPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + nodeSelector: + additionalProperties: + type: string + type: object + replicas: + type: integer + serviceName: + type: string + sysAdminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + type: object + status: + properties: + msg: + type: string + phase: + type: string + status: + type: boolean + required: + - phase + - status + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/database.oracle.com_lrpdbs.yaml b/config/crd/bases/database.oracle.com_lrpdbs.yaml new file mode 100644 index 00000000..14ad7f29 --- /dev/null +++ b/config/crd/bases/database.oracle.com_lrpdbs.yaml @@ -0,0 +1,369 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: lrpdbs.database.oracle.com +spec: + group: database.oracle.com + names: + kind: LRPDB + listKind: LRPDBList + plural: lrpdbs + singular: lrpdb + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Name of the CDB + jsonPath: .spec.cdbName + name: CDB Name + type: string + - description: Name of the PDB + jsonPath: .spec.pdbName + name: PDB Name + type: string + - description: PDB Open Mode + jsonPath: .status.openMode + name: PDB State + type: string + - description: Total Size of the PDB + jsonPath: .status.totalSize + name: PDB Size + type: string + - description: Status of the LRPDB Resource + jsonPath: .status.phase + name: Status + type: string + - description: Error message, if any + jsonPath: .status.msg + name: Message + type: string + - description: last sqlcode + jsonPath: .status.sqlCode + name: last sqlcode + type: integer + - description: The connect string to be used + jsonPath: .status.connString + name: Connect_String + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + action: + enum: + - Create + - Clone + - Plug + - Unplug + - Delete + - Modify + - Status + - Map + - Alter + - Noaction + type: string + adminName: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + adminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + adminpdbPass: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + adminpdbUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + alterSystem: + type: string + alterSystemParameter: + type: string + alterSystemValue: + type: string + asClone: + type: boolean + assertiveLrpdbDeletion: + type: boolean + cdbName: + type: string + cdbNamespace: + type: string + cdbPrvKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbResName: + type: string + copyAction: + enum: + - COPY + - NOCOPY + - MOVE + type: string + dropAction: + enum: + - INCLUDING + - KEEP + type: string + fileNameConversions: + type: string + getScript: + type: boolean + lrpdbTlsCat: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + lrpdbTlsCrt: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + lrpdbTlsKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + modifyOption: + enum: + - IMMEDIATE + - NORMAL + - READ ONLY + - READ WRITE + - RESTRICTED + type: string + parameterScope: + type: string + pdbName: + type: string + pdbState: + enum: + - OPEN + - CLOSE + - ALTER + type: string + pdbconfigmap: + type: string + reuseTempFile: + type: boolean + sourceFileNameConversions: + type: string + sparseClonePath: + type: string + srcPdbName: + type: string + tdeExport: + type: boolean + tdeImport: + type: boolean + tdeKeystorePath: + type: string + tdePassword: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + tdeSecret: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + tempSize: + type: string + totalSize: + type: string + unlimitedStorage: + type: boolean + webServerPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + xmlFileName: + type: string + required: + - action + - alterSystemParameter + - alterSystemValue + - webServerPwd + type: object + status: + properties: + action: + type: string + alterSystem: + type: string + bitstat: + type: integer + bitstatstr: + type: string + connString: + type: string + modifyOption: + type: string + msg: + type: string + openMode: + type: string + phase: + type: string + sqlCode: + type: integer + status: + type: boolean + totalSize: + type: string + required: + - phase + - sqlCode + - status + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/database.oracle.com_oraclerestdataservices.yaml b/config/crd/bases/database.oracle.com_oraclerestdataservices.yaml index 121383fd..fe93a531 100644 --- a/config/crd/bases/database.oracle.com_oraclerestdataservices.yaml +++ b/config/crd/bases/database.oracle.com_oraclerestdataservices.yaml @@ -1,11 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.5 name: oraclerestdataservices.database.oracle.com spec: group: database.oracle.com @@ -32,30 +30,22 @@ spec: - jsonPath: .status.apexUrl name: Apex URL type: string + - jsonPath: .status.mongoDbApiAccessUrl + name: MongoDbApi Access URL + type: string name: v1alpha1 schema: openAPIV3Schema: - description: OracleRestDataService is the Schema for the oraclerestdataservices - API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: OracleRestDataServiceSpec defines the desired state of OracleRestDataService properties: adminPassword: - description: OracleRestDataServicePassword defines the secret containing - Password mapped to secretKey properties: keepSecret: type: boolean @@ -67,9 +57,30 @@ spec: required: - secretName type: object - apexPassword: - description: OracleRestDataServicePassword defines the secret containing - Password mapped to secretKey + databaseRef: + type: string + image: + properties: + pullFrom: + type: string + pullSecrets: + type: string + version: + type: string + required: + - pullFrom + type: object + loadBalancer: + type: boolean + mongoDbApi: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + oracleService: + type: string + ordsPassword: properties: keepSecret: type: boolean @@ -81,11 +92,71 @@ spec: required: - secretName type: object + ordsUser: + type: string + persistence: + properties: + accessMode: + enum: + - ReadWriteOnce + - ReadWriteMany + type: string + setWritePermissions: + type: boolean + size: + type: string + storageClass: + type: string + volumeName: + type: string + type: object + readinessCheckPeriod: + type: integer + replicas: + minimum: 1 + type: integer + restEnableSchemas: + items: + properties: + enable: + type: boolean + pdbName: + type: string + schemaName: + type: string + urlMapping: + type: string + required: + - enable + - schemaName + type: object + type: array + serviceAccountName: + type: string + serviceAnnotations: + additionalProperties: + type: string + type: object + required: + - adminPassword + - databaseRef + - ordsPassword + type: object + status: + properties: + apexConfigured: + type: boolean + apexUrl: + type: string + commonUsersCreated: + type: boolean + databaseActionsUrl: + type: string + databaseApiUrl: + type: string databaseRef: type: string image: - description: OracleRestDataServiceImage defines the Image source and - pullSecrets for POD properties: pullFrom: type: string @@ -97,6 +168,84 @@ spec: - pullFrom type: object loadBalancer: + type: string + mongoDbApi: + type: boolean + mongoDbApiAccessUrl: + type: string + ordsInstalled: + type: boolean + replicas: + type: integer + serviceIP: + type: string + status: + type: string + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .spec.databaseRef + name: Database + type: string + - jsonPath: .status.databaseApiUrl + name: Database API URL + type: string + - jsonPath: .status.databaseActionsUrl + name: Database Actions URL + type: string + - jsonPath: .status.apexUrl + name: Apex URL + type: string + - jsonPath: .status.mongoDbApiAccessUrl + name: MongoDbApi Access URL + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + adminPassword: + properties: + keepSecret: + type: boolean + secretKey: + default: oracle_pwd + type: string + secretName: + type: string + required: + - secretName + type: object + databaseRef: + type: string + image: + properties: + pullFrom: + type: string + pullSecrets: + type: string + version: + type: string + required: + - pullFrom + type: object + loadBalancer: + type: boolean + mongoDbApi: type: boolean nodeSelector: additionalProperties: @@ -105,8 +254,6 @@ spec: oracleService: type: string ordsPassword: - description: OracleRestDataServicePassword defines the secret containing - Password mapped to secretKey properties: keepSecret: type: boolean @@ -121,14 +268,14 @@ spec: ordsUser: type: string persistence: - description: OracleRestDataServicePersistence defines the storage - releated params properties: accessMode: enum: - ReadWriteOnce - ReadWriteMany type: string + setWritePermissions: + type: boolean size: type: string storageClass: @@ -136,13 +283,13 @@ spec: volumeName: type: string type: object + readinessCheckPeriod: + type: integer replicas: minimum: 1 type: integer restEnableSchemas: items: - description: OracleRestDataServicePDBSchemas defines the PDB Schemas - to be ORDS Enabled properties: enable: type: boolean @@ -169,8 +316,6 @@ spec: - ordsPassword type: object status: - description: OracleRestDataServiceStatus defines the observed state of - OracleRestDataService properties: apexConfigured: type: boolean @@ -185,8 +330,6 @@ spec: databaseRef: type: string image: - description: OracleRestDataServiceImage defines the Image source and - pullSecrets for POD properties: pullFrom: type: string @@ -199,6 +342,10 @@ spec: type: object loadBalancer: type: string + mongoDbApi: + type: boolean + mongoDbApiAccessUrl: + type: string ordsInstalled: type: boolean replicas: @@ -206,9 +353,6 @@ spec: serviceIP: type: string status: - description: 'INSERT ADDITIONAL STATUS FIELD - define observed state - of cluster Important: Run "make" to regenerate code after modifying - this file' type: string type: object type: object @@ -216,9 +360,3 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/bases/database.oracle.com_ordssrvs.yaml b/config/crd/bases/database.oracle.com_ordssrvs.yaml new file mode 100644 index 00000000..9c4ab88f --- /dev/null +++ b/config/crd/bases/database.oracle.com_ordssrvs.yaml @@ -0,0 +1,488 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: ordssrvs.database.oracle.com +spec: + group: database.oracle.com + names: + kind: OrdsSrvs + listKind: OrdsSrvsList + plural: ordssrvs + singular: ordssrvs + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.status + name: status + type: string + - jsonPath: .status.workloadType + name: workloadType + type: string + - jsonPath: .status.ordsVersion + name: ordsVersion + type: string + - jsonPath: .status.httpPort + name: httpPort + type: integer + - jsonPath: .status.httpsPort + name: httpsPort + type: integer + - jsonPath: .status.mongoPort + name: MongoPort + type: integer + - jsonPath: .status.restartRequired + name: restartRequired + type: boolean + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + - jsonPath: .status.ordsInstalled + name: OrdsInstalled + type: boolean + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + encPrivKey: + properties: + passwordKey: + default: password + type: string + secretName: + type: string + required: + - secretName + type: object + forceRestart: + type: boolean + globalSettings: + properties: + cache.metadata.enabled: + type: boolean + cache.metadata.graphql.expireAfterAccess: + format: int64 + type: integer + cache.metadata.graphql.expireAfterWrite: + format: int64 + type: integer + cache.metadata.jwks.enabled: + type: boolean + cache.metadata.jwks.expireAfterAccess: + format: int64 + type: integer + cache.metadata.jwks.expireAfterWrite: + format: int64 + type: integer + cache.metadata.jwks.initialCapacity: + format: int32 + type: integer + cache.metadata.jwks.maximumSize: + format: int32 + type: integer + cache.metadata.timeout: + format: int64 + type: integer + certSecret: + properties: + cert: + type: string + key: + type: string + secretName: + type: string + required: + - cert + - key + - secretName + type: object + database.api.enabled: + type: boolean + database.api.management.services.disabled: + type: boolean + db.invalidPoolTimeout: + format: int64 + type: integer + debug.printDebugToScreen: + type: boolean + enable.mongo.access.log: + default: false + type: boolean + enable.standalone.access.log: + default: false + type: boolean + error.responseFormat: + type: string + feature.grahpql.max.nesting.depth: + format: int32 + type: integer + icap.port: + format: int32 + type: integer + icap.secure.port: + format: int32 + type: integer + icap.server: + type: string + log.procedure: + type: boolean + mongo.enabled: + type: boolean + mongo.idle.timeout: + format: int64 + type: integer + mongo.op.timeout: + format: int64 + type: integer + mongo.port: + default: 27017 + format: int32 + type: integer + request.traceHeaderName: + type: string + security.credentials.attempts: + format: int32 + type: integer + security.credentials.lock.time: + format: int64 + type: integer + security.disableDefaultExclusionList: + type: boolean + security.exclusionList: + type: string + security.externalSessionTrustedOrigins: + type: string + security.forceHTTPS: + type: boolean + security.httpsHeaderCheck: + type: string + security.inclusionList: + type: string + security.maxEntries: + format: int32 + type: integer + security.verifySSL: + type: boolean + standalone.context.path: + default: /ords + type: string + standalone.http.port: + default: 8080 + format: int32 + type: integer + standalone.https.host: + type: string + standalone.https.port: + default: 8443 + format: int32 + type: integer + standalone.stop.timeout: + format: int64 + type: integer + type: object + image: + type: string + imagePullPolicy: + default: IfNotPresent + enum: + - IfNotPresent + - Always + - Never + type: string + imagePullSecrets: + type: string + poolSettings: + items: + properties: + apex.security.administrator.roles: + type: string + apex.security.user.roles: + type: string + autoUpgradeAPEX: + default: false + type: boolean + autoUpgradeORDS: + default: false + type: boolean + db.adminUser: + type: string + db.adminUser.secret: + properties: + passwordKey: + default: password + type: string + secretName: + type: string + required: + - secretName + type: object + db.cdb.adminUser: + type: string + db.cdb.adminUser.secret: + properties: + passwordKey: + default: password + type: string + secretName: + type: string + required: + - secretName + type: object + db.connectionType: + enum: + - basic + - tns + - customurl + type: string + db.credentialsSource: + enum: + - pool + - request + type: string + db.customURL: + type: string + db.hostname: + type: string + db.poolDestroyTimeout: + format: int64 + type: integer + db.port: + format: int32 + type: integer + db.secret: + properties: + passwordKey: + default: password + type: string + secretName: + type: string + required: + - secretName + type: object + db.servicename: + type: string + db.sid: + type: string + db.tnsAliasName: + type: string + db.username: + default: ORDS_PUBLIC_USER + type: string + db.wallet.zip.service: + type: string + dbWalletSecret: + properties: + secretName: + type: string + walletName: + type: string + required: + - secretName + - walletName + type: object + debug.trackResources: + type: boolean + feature.openservicebroker.exclude: + type: boolean + feature.sdw: + type: boolean + http.cookie.filter: + type: string + jdbc.DriverType: + enum: + - thin + - oci8 + type: string + jdbc.InactivityTimeout: + format: int32 + type: integer + jdbc.InitialLimit: + format: int32 + type: integer + jdbc.MaxConnectionReuseCount: + format: int32 + type: integer + jdbc.MaxConnectionReuseTime: + format: int32 + type: integer + jdbc.MaxLimit: + format: int32 + type: integer + jdbc.MaxStatementsLimit: + format: int32 + type: integer + jdbc.MinLimit: + format: int32 + type: integer + jdbc.SecondsToTrustIdleConnection: + format: int32 + type: integer + jdbc.auth.admin.role: + type: string + jdbc.auth.enabled: + type: boolean + jdbc.cleanup.mode: + type: string + jdbc.statementTimeout: + format: int32 + type: integer + misc.defaultPage: + type: string + misc.pagination.maxRows: + format: int32 + type: integer + owa.trace.sql: + type: boolean + plsql.gateway.mode: + enum: + - disabled + - direct + - proxied + type: string + poolName: + type: string + procedure.preProcess: + type: string + procedure.rest.preHook: + type: string + procedurePostProcess: + type: string + restEnabledSql.active: + type: boolean + security.jwks.connection.timeout: + format: int64 + type: integer + security.jwks.read.timeout: + format: int64 + type: integer + security.jwks.refresh.interval: + format: int64 + type: integer + security.jwks.size: + format: int32 + type: integer + security.jwt.allowed.age: + format: int64 + type: integer + security.jwt.allowed.skew: + format: int64 + type: integer + security.jwt.profile.enabled: + type: boolean + security.requestAuthenticationFunction: + type: string + security.requestValidationFunction: + default: ords_util.authorize_plsql_gateway + type: string + security.validationFunctionType: + enum: + - plsql + - javascript + type: string + soda.defaultLimit: + type: string + soda.maxLimit: + type: string + tnsAdminSecret: + properties: + secretName: + type: string + required: + - secretName + type: object + required: + - db.secret + - poolName + type: object + type: array + replicas: + default: 1 + format: int32 + minimum: 1 + type: integer + workloadType: + default: Deployment + enum: + - Deployment + - StatefulSet + - DaemonSet + type: string + required: + - globalSettings + - image + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + httpPort: + format: int32 + type: integer + httpsPort: + format: int32 + type: integer + mongoPort: + format: int32 + type: integer + ordsInstalled: + type: boolean + ordsVersion: + type: string + restartRequired: + type: boolean + status: + type: string + workloadType: + type: string + required: + - restartRequired + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/database.oracle.com_pdbs.yaml b/config/crd/bases/database.oracle.com_pdbs.yaml index 85af8c1b..b674f856 100644 --- a/config/crd/bases/database.oracle.com_pdbs.yaml +++ b/config/crd/bases/database.oracle.com_pdbs.yaml @@ -1,11 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.5 name: pdbs.database.oracle.com spec: group: database.oracle.com @@ -17,10 +15,6 @@ spec: scope: Namespaced versions: - additionalPrinterColumns: - - description: The connect string to be used - jsonPath: .status.connString - name: Connect_String - type: string - description: Name of the CDB jsonPath: .spec.cdbName name: CDB Name @@ -45,29 +39,23 @@ spec: jsonPath: .status.msg name: Message type: string - name: v1alpha1 + - description: The connect string to be used + jsonPath: .status.connString + name: Connect_String + type: string + name: v4 schema: openAPIV3Schema: - description: PDB is the Schema for the pdbs API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: PDBSpec defines the desired state of PDB properties: action: - description: 'Action to be taken: Create/Clone/Plug/Unplug/Delete/Modify/Status/Map. - Map is used to map a Databse PDB to a Kubernetes PDB CR.' enum: - Create - Clone @@ -79,11 +67,8 @@ spec: - Map type: string adminName: - description: The administrator username for the new PDB. This property - is required when the Action property is Create. properties: secret: - description: PDBSecret defines the secretName properties: key: type: string @@ -97,11 +82,8 @@ spec: - secret type: object adminPwd: - description: The administrator password for the new PDB. This property - is required when the Action property is Create. properties: secret: - description: PDBSecret defines the secretName properties: key: type: string @@ -115,9 +97,8 @@ spec: - secret type: object asClone: - description: Indicate if 'AS CLONE' option should be used in the command - to plug in a PDB. This property is applicable when the Action property - is PLUG but not required. + type: boolean + assertivePdbDeletion: type: boolean assertivePdbDeletion: description: turn on the assertive approach to delete pdb resource @@ -125,38 +106,30 @@ spec: deletion type: boolean cdbName: - description: Name of the CDB + type: string + cdbNamespace: type: string cdbNamespace: description: CDB Namespace type: string cdbResName: - description: Name of the CDB Custom Resource that runs the ORDS container type: string copyAction: - description: To copy files or not while cloning a PDB enum: - COPY - NOCOPY - MOVE type: string dropAction: - description: Specify if datafiles should be removed or not. The value - can be INCLUDING or KEEP (default). enum: - INCLUDING - KEEP type: string fileNameConversions: - description: Relevant for Create and Plug operations. As defined in - the Oracle Multitenant Database documentation. Values can be a - filename convert pattern or NONE. type: string getScript: - description: Whether you need the script only or execute the script type: boolean modifyOption: - description: Extra options for opening and closing a PDB enum: - IMMEDIATE - NORMAL @@ -165,11 +138,38 @@ spec: - RESTRICTED type: string pdbName: - description: The name of the new PDB. Relevant for both Create and - Plug Actions. type: string + pdbOrdsPrvKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbOrdsPubKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object pdbState: - description: The target state of the PDB enum: - OPEN - CLOSE @@ -177,7 +177,6 @@ spec: pdbTlsCat: properties: secret: - description: PDBSecret defines the secretName properties: key: type: string @@ -193,7 +192,6 @@ spec: pdbTlsCrt: properties: secret: - description: PDBSecret defines the secretName properties: key: type: string @@ -209,7 +207,6 @@ spec: pdbTlsKey: properties: secret: - description: PDBSecret defines the secretName properties: key: type: string @@ -223,35 +220,22 @@ spec: - secret type: object reuseTempFile: - description: Whether to reuse temp file type: boolean sourceFileNameConversions: - description: This property is required when the Action property is - Plug. As defined in the Oracle Multitenant Database documentation. - Values can be a source filename convert pattern or NONE. type: string sparseClonePath: - description: A Path specified for sparse clone snapshot copy. (Optional) type: string srcPdbName: - description: Name of the Source PDB from which to clone type: string tdeExport: - description: TDE export for unplug operations type: boolean tdeImport: - description: TDE import for plug operations type: boolean tdeKeystorePath: - description: TDE keystore path is required if the tdeImport or tdeExport - flag is set to true. Can be used in plug or unplug operations. type: string tdePassword: - description: TDE password if the tdeImport or tdeExport flag is set - to true. Can be used in create, plug or unplug operations properties: secret: - description: PDBSecret defines the secretName properties: key: type: string @@ -265,11 +249,8 @@ spec: - secret type: object tdeSecret: - description: TDE secret is required if the tdeImport or tdeExport - flag is set to true. Can be used in plug or unplug operations. properties: secret: - description: PDBSecret defines the secretName properties: key: type: string @@ -283,26 +264,14 @@ spec: - secret type: object tempSize: - description: Relevant for Create and Clone operations. Total size - for temporary tablespace as defined in the Oracle Multitenant Database - documentation. See size_clause description in Database SQL Language - Reference documentation. type: string totalSize: - description: Relevant for create and plug operations. Total size as - defined in the Oracle Multitenant Database documentation. See size_clause - description in Database SQL Language Reference documentation. type: string unlimitedStorage: - description: Relevant for Create and Plug operations. True for unlimited - storage. Even when set to true, totalSize and tempSize MUST be specified - in the request if Action is Create. type: boolean webServerPwd: - description: Password for the Web ServerPDB User properties: secret: - description: PDBSecret defines the secretName properties: key: type: string @@ -316,11 +285,8 @@ spec: - secret type: object webServerUser: - description: Web Server User with SQL Administrator role to allow - us to authenticate to the PDB Lifecycle Management REST endpoints properties: secret: - description: PDBSecret defines the secretName properties: key: type: string @@ -334,37 +300,27 @@ spec: - secret type: object xmlFileName: - description: XML metadata filename to be used for Plug or Unplug operations type: string required: - action type: object status: - description: PDBStatus defines the observed state of PDB properties: action: - description: Last Completed Action type: string connString: - description: PDB Connect String type: string modifyOption: - description: Modify Option of the PDB type: string msg: - description: Message type: string openMode: - description: Open mode of the PDB type: string phase: - description: Phase of the PDB Resource type: string status: - description: PDB Resource Status type: boolean totalSize: - description: Total size of the PDB type: string required: - phase @@ -375,9 +331,3 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/bases/database.oracle.com_shardingdatabases.yaml b/config/crd/bases/database.oracle.com_shardingdatabases.yaml index 641629a0..e46d883e 100644 --- a/config/crd/bases/database.oracle.com_shardingdatabases.yaml +++ b/config/crd/bases/database.oracle.com_shardingdatabases.yaml @@ -1,11 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.5 name: shardingdatabases.database.oracle.com spec: group: database.oracle.com @@ -30,33 +28,22 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: ShardingDatabase is the Schema for the shardingdatabases API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: ShardingDatabaseSpec defines the desired state of ShardingDatabase properties: InvitedNodeSubnet: type: string catalog: items: - description: CatalogSpec defines the desired state of CatalogSpec properties: envVars: items: - description: EnvironmentVariable represents a named variable - accessible for containers. properties: name: type: string @@ -68,8 +55,6 @@ spec: type: object type: array imagePullPolicy: - description: PullPolicy describes a policy for if/when to pull - a container image type: string isDelete: type: string @@ -92,23 +77,13 @@ spec: pvcName: type: string resources: - description: ResourceRequirements describes the compute resource - requirements. properties: claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It can only - be set for containers." items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: Name must match the name of one entry - in pod.spec.resourceClaims of the Pod where this - field is used. It makes that resource available - inside a container. + type: string + request: type: string required: - name @@ -124,8 +99,6 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -134,11 +107,6 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests - cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object storageSizeInGb: @@ -155,7 +123,6 @@ spec: dbImagePullSecret: type: string dbSecret: - description: Secret Details properties: encryptionType: type: string @@ -183,7 +150,6 @@ spec: type: string gsm: items: - description: GsmSpec defines the desired state of GsmSpec properties: directorName: type: string @@ -192,8 +158,6 @@ spec: Gsm Replicas. If you set OraGsmPvcName then it is set default to 1. items: - description: EnvironmentVariable represents a named variable - accessible for containers. properties: name: type: string @@ -205,8 +169,6 @@ spec: type: object type: array imagePullPolicy: - description: PullPolicy describes a policy for if/when to pull - a container image type: string isDelete: type: string @@ -218,6 +180,10 @@ spec: additionalProperties: type: string type: object + pvAnnotations: + additionalProperties: + type: string + type: object pvMatchLabels: additionalProperties: type: string @@ -227,23 +193,13 @@ spec: region: type: string resources: - description: ResourceRequirements describes the compute resource - requirements. properties: claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It can only - be set for containers." items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: Name must match the name of one entry - in pod.spec.resourceClaims of the Pod where this - field is used. It makes that resource available - inside a container. + type: string + request: type: string required: - name @@ -259,8 +215,6 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -269,11 +223,6 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests - cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object storageSizeInGb: @@ -291,7 +240,6 @@ spec: type: string gsmService: items: - description: Service Definition properties: available: type: string @@ -370,7 +318,6 @@ spec: type: array gsmShardSpace: items: - description: ShardSpace Specs properties: chunks: type: integer @@ -402,18 +349,13 @@ spec: type: string liveinessCheckPeriod: type: integer - namespace: - type: string portMappings: items: - description: PortMapping is a specification of port mapping for - an application deployment. properties: port: format: int32 type: integer protocol: - default: TCP type: string targetPort: format: int32 @@ -431,18 +373,12 @@ spec: scriptsLocation: type: string shard: - description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - Important: Run "make" to regenerate code after modifying this file' items: - description: ShardSpec is a specification of Shards for an application - deployment. properties: deployAs: type: string envVars: items: - description: EnvironmentVariable represents a named variable - accessible for containers. properties: name: type: string @@ -454,8 +390,6 @@ spec: type: object type: array imagePullPolicy: - description: PullPolicy describes a policy for if/when to pull - a container image type: string isDelete: enum: @@ -483,23 +417,13 @@ spec: pvcName: type: string resources: - description: ResourceRequirements describes the compute resource - requirements. properties: claims: - description: "Claims lists the names of resources, defined - in spec.resourceClaims, that are used by this container. - \n This is an alpha field and requires enabling the DynamicResourceAllocation - feature gate. \n This field is immutable. It can only - be set for containers." items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: Name must match the name of one entry - in pod.spec.resourceClaims of the Pod where this - field is used. It makes that resource available - inside a container. + type: string + request: type: string required: - name @@ -515,8 +439,6 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -525,11 +447,6 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute - resources required. If Requests is omitted for a container, - it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests - cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object shardGroup: @@ -563,6 +480,560 @@ spec: type: string tdeWalletPvcMountLocation: type: string + topicId: + type: string + required: + - catalog + - dbImage + - gsm + - gsmImage + - shard + type: object + status: + properties: + catalogs: + additionalProperties: + type: string + type: object + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + gsm: + properties: + details: + additionalProperties: + type: string + type: object + externalConnectStr: + type: string + internalConnectStr: + type: string + services: + type: string + shards: + additionalProperties: + type: string + type: object + state: + type: string + type: object + shards: + additionalProperties: + type: string + type: object + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.gsm.state + name: Gsm State + type: string + - jsonPath: .status.gsm.services + name: Services + type: string + - jsonPath: .status.gsm.shards + name: shards + priority: 1 + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + InvitedNodeSubnet: + type: string + catalog: + items: + properties: + envVars: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + imagePullPolicy: + type: string + isDelete: + type: string + label: + type: string + name: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + pvAnnotations: + additionalProperties: + type: string + type: object + pvMatchLabels: + additionalProperties: + type: string + type: object + pvcName: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + shardGroup: + type: string + shardRegion: + type: string + shardSpace: + type: string + storageSizeInGb: + format: int32 + type: integer + required: + - name + type: object + type: array + dbEdition: + type: string + dbImage: + type: string + dbImagePullSecret: + type: string + dbSecret: + properties: + encryptionType: + type: string + keyFileMountLocation: + type: string + keyFileName: + type: string + keySecretName: + type: string + name: + type: string + nsConfigMap: + type: string + nsSecret: + type: string + pwdFileMountLocation: + type: string + pwdFileName: + type: string + required: + - name + - pwdFileName + type: object + fssStorageClass: + type: string + gsm: + items: + properties: + directorName: + type: string + envVars: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + imagePullPolicy: + type: string + isDelete: + type: string + label: + type: string + name: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + pvAnnotations: + additionalProperties: + type: string + type: object + pvMatchLabels: + additionalProperties: + type: string + type: object + pvcName: + type: string + region: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + storageSizeInGb: + format: int32 + type: integer + required: + - name + type: object + type: array + gsmDevMode: + type: string + gsmImage: + type: string + gsmImagePullSecret: + type: string + gsmService: + items: + properties: + available: + type: string + clbGoal: + type: string + commitOutcome: + type: string + drainTimeout: + type: string + dtp: + type: string + edition: + type: string + failoverDelay: + type: string + failoverMethod: + type: string + failoverPrimary: + type: string + failoverRestore: + type: string + failoverRetry: + type: string + failoverType: + type: string + gdsPool: + type: string + lag: + type: integer + locality: + type: string + name: + type: string + notification: + type: string + pdbName: + type: string + policy: + type: string + preferred: + type: string + prferredAll: + type: string + regionFailover: + type: string + retention: + type: string + role: + type: string + sessionState: + type: string + sqlTransactionProfile: + type: string + stopOption: + type: string + tableFamily: + type: string + tfaPolicy: + type: string + required: + - name + type: object + type: array + gsmShardGroup: + items: + properties: + deployAs: + type: string + name: + type: string + region: + type: string + required: + - name + type: object + type: array + gsmShardSpace: + items: + properties: + chunks: + type: integer + name: + type: string + protectionMode: + type: string + shardGroup: + type: string + required: + - name + type: object + type: array + invitedNodeSubnetFlag: + type: string + isClone: + type: boolean + isDataGuard: + type: boolean + isDebug: + type: boolean + isDeleteOraPvc: + type: boolean + isDownloadScripts: + type: boolean + isExternalSvc: + type: boolean + isTdeWallet: + type: string + liveinessCheckPeriod: + type: integer + portMappings: + items: + properties: + port: + format: int32 + type: integer + protocol: + type: string + targetPort: + format: int32 + type: integer + required: + - port + - protocol + - targetPort + type: object + type: array + readinessCheckPeriod: + type: integer + replicationType: + type: string + scriptsLocation: + type: string + shard: + items: + properties: + deployAs: + type: string + envVars: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + imagePullPolicy: + type: string + isDelete: + enum: + - enable + - disable + - failed + - force + type: string + label: + type: string + name: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + pvAnnotations: + additionalProperties: + type: string + type: object + pvMatchLabels: + additionalProperties: + type: string + type: object + pvcName: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + shardGroup: + type: string + shardRegion: + type: string + shardSpace: + type: string + storageSizeInGb: + format: int32 + type: integer + required: + - name + type: object + type: array + shardBuddyRegion: + type: string + shardConfigName: + type: string + shardRegion: + items: + type: string + type: array + shardingType: + type: string + stagePvcName: + type: string + storageClass: + type: string + tdeWalletPvc: + type: string + tdeWalletPvcMountLocation: + type: string + topicId: + type: string required: - catalog - dbImage @@ -571,8 +1042,6 @@ spec: - shard type: object status: - description: To understand Metav1.Condition, please refer the link https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1 - ShardingDatabaseStatus defines the observed state of ShardingDatabase properties: catalogs: additionalProperties: @@ -580,63 +1049,29 @@ spec: type: object conditions: items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n \ttype FooStatus struct{ \t // Represents the observations - of a foo's current state. \t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\" \t // - +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map - \t // +listMapKey=type \t Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields - \t}" properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. maxLength: 1024 minLength: 1 pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: - description: status of the condition, one of True, False, Unknown. enum: - "True" - "False" - Unknown type: string type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -680,9 +1115,3 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/bases/database.oracle.com_singleinstancedatabases.yaml b/config/crd/bases/database.oracle.com_singleinstancedatabases.yaml index 1c011e17..8357f2c5 100644 --- a/config/crd/bases/database.oracle.com_singleinstancedatabases.yaml +++ b/config/crd/bases/database.oracle.com_singleinstancedatabases.yaml @@ -1,11 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.5 name: singleinstancedatabases.database.oracle.com spec: group: database.oracle.com @@ -53,27 +51,16 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: SingleInstanceDatabase is the Schema for the singleinstancedatabases - API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: SingleInstanceDatabaseSpec defines the desired state of SingleInstanceDatabase properties: adminPassword: - description: SingleInsatnceAdminPassword defines the secret containing - Admin Password mapped to secretKey for Database properties: keepSecret: type: boolean @@ -89,14 +76,15 @@ spec: type: boolean charset: type: string + convertToSnapshotStandby: + type: boolean createAs: enum: - primary - standby - clone + - truecache type: string - dgBrokerConfigured: - type: boolean edition: enum: - standard @@ -111,8 +99,6 @@ spec: forceLog: type: boolean image: - description: SingleInstanceDatabaseImage defines the Image source - and pullSecrets for POD properties: prebuiltDB: type: boolean @@ -126,7 +112,6 @@ spec: - pullFrom type: object initParams: - description: SingleInstanceDatabaseInitParams defines the Init Parameters properties: cpuCount: type: integer @@ -148,8 +133,6 @@ spec: pdbName: type: string persistence: - description: SingleInstanceDatabasePersistence defines the storage - size and class for PVC properties: accessMode: enum: @@ -199,8 +182,6 @@ spec: type: string type: object sid: - description: SID must be alphanumeric (no special characters, only - a-z, A-Z, 0-9), and no longer than 12 characters. maxLength: 12 pattern: ^[a-zA-Z0-9]+$ type: string @@ -210,12 +191,14 @@ spec: type: integer tcpsTlsSecret: type: string + trueCacheServices: + items: + type: string + type: array required: - image type: object status: - description: SingleInstanceDatabaseStatus defines the observed state of - SingleInstanceDatabase properties: apexInstalled: type: boolean @@ -233,63 +216,29 @@ spec: type: string conditions: items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n \ttype FooStatus struct{ \t // Represents the observations - of a foo's current state. \t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\" \t // - +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map - \t // +listMapKey=type \t Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields - \t}" properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. maxLength: 1024 minLength: 1 pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: - description: status of the condition, one of True, False, Unknown. enum: - "True" - "False" - Unknown type: string type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -306,6 +255,8 @@ spec: x-kubernetes-list-type: map connectString: type: string + convertToSnapshotStandby: + type: boolean createdAs: type: string datafilesCreated: @@ -314,8 +265,355 @@ spec: datafilesPatched: default: "false" type: string - dgBrokerConfigured: + dgBroker: + type: string + edition: + type: string + flashBack: + type: string + forceLog: + type: string + initParams: + properties: + cpuCount: + type: integer + pgaAggregateTarget: + type: integer + processes: + type: integer + sgaTarget: + type: integer + type: object + initPgaSize: + type: integer + initSgaSize: + type: integer + isTcpsEnabled: + default: false + type: boolean + nodes: + items: + type: string + type: array + oemExpressUrl: + type: string + ordsReference: + type: string + pdbConnectString: + type: string + pdbName: + type: string + persistence: + properties: + accessMode: + enum: + - ReadWriteOnce + - ReadWriteMany + type: string + datafilesVolumeName: + type: string + scriptsVolumeName: + type: string + setWritePermissions: + type: boolean + size: + type: string + storageClass: + type: string + volumeClaimAnnotation: + type: string + type: object + prebuiltDB: + type: boolean + primaryDatabase: + type: string + releaseUpdate: + type: string + replicas: + type: integer + role: + type: string + sid: + type: string + standbyDatabases: + additionalProperties: + type: string + type: object + status: + type: string + tcpsConnectString: + type: string + tcpsPdbConnectString: + type: string + tcpsTlsSecret: + default: "" + type: string + required: + - isTcpsEnabled + - persistence + - tcpsTlsSecret + type: object + type: object + served: true + storage: false + subresources: + scale: + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} + - additionalPrinterColumns: + - jsonPath: .status.edition + name: Edition + type: string + - jsonPath: .status.sid + name: Sid + priority: 1 + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.role + name: Role + type: string + - jsonPath: .status.releaseUpdate + name: Version + type: string + - jsonPath: .status.connectString + name: Connect Str + type: string + - jsonPath: .status.pdbConnectString + name: Pdb Connect Str + priority: 1 + type: string + - jsonPath: .status.tcpsConnectString + name: TCPS Connect Str + type: string + - jsonPath: .status.tcpsPdbConnectString + name: TCPS Pdb Connect Str + priority: 1 + type: string + - jsonPath: .status.oemExpressUrl + name: Oem Express Url + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + adminPassword: + properties: + keepSecret: + type: boolean + secretKey: + default: oracle_pwd + type: string + secretName: + type: string + required: + - secretName + type: object + archiveLog: + type: boolean + charset: + type: string + convertToSnapshotStandby: + type: boolean + createAs: + enum: + - primary + - standby + - clone + - truecache + type: string + edition: + enum: + - standard + - enterprise + - express + - free + type: string + enableTCPS: + type: boolean + flashBack: + type: boolean + forceLog: + type: boolean + image: + properties: + prebuiltDB: + type: boolean + pullFrom: + type: string + pullSecrets: + type: string + version: + type: string + required: + - pullFrom + type: object + initParams: + properties: + cpuCount: + type: integer + pgaAggregateTarget: + type: integer + processes: + type: integer + sgaTarget: + type: integer + type: object + listenerPort: + type: integer + loadBalancer: type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + pdbName: + type: string + persistence: + properties: + accessMode: + enum: + - ReadWriteOnce + - ReadWriteMany + type: string + datafilesVolumeName: + type: string + scriptsVolumeName: + type: string + setWritePermissions: + type: boolean + size: + type: string + storageClass: + type: string + volumeClaimAnnotation: + type: string + type: object + primaryDatabaseRef: + type: string + readinessCheckPeriod: + type: integer + replicas: + type: integer + resources: + properties: + limits: + properties: + cpu: + type: string + memory: + type: string + type: object + requests: + properties: + cpu: + type: string + memory: + type: string + type: object + type: object + serviceAccountName: + type: string + serviceAnnotations: + additionalProperties: + type: string + type: object + sid: + maxLength: 12 + pattern: ^[a-zA-Z0-9]+$ + type: string + tcpsCertRenewInterval: + type: string + tcpsListenerPort: + type: integer + tcpsTlsSecret: + type: string + trueCacheServices: + items: + type: string + type: array + required: + - image + type: object + status: + properties: + apexInstalled: + type: boolean + archiveLog: + type: string + certCreationTimestamp: + type: string + certRenewInterval: + type: string + charset: + type: string + clientWalletLoc: + type: string + clusterConnectString: + type: string + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + connectString: + type: string + convertToSnapshotStandby: + type: boolean + createdAs: + type: string + datafilesCreated: + default: "false" + type: string + datafilesPatched: + default: "false" + type: string + dgBroker: + type: string edition: type: string flashBack: @@ -323,7 +621,6 @@ spec: forceLog: type: string initParams: - description: SingleInstanceDatabaseInitParams defines the Init Parameters properties: cpuCount: type: integer @@ -354,8 +651,6 @@ spec: pdbName: type: string persistence: - description: SingleInstanceDatabasePersistence defines the storage - size and class for PVC properties: accessMode: enum: @@ -413,9 +708,3 @@ spec: specReplicasPath: .spec.replicas statusReplicasPath: .status.replicas status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/bases/observability.oracle.com_databaseobservers.yaml b/config/crd/bases/observability.oracle.com_databaseobservers.yaml index b0801738..298f9d4e 100644 --- a/config/crd/bases/observability.oracle.com_databaseobservers.yaml +++ b/config/crd/bases/observability.oracle.com_databaseobservers.yaml @@ -1,11 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.16.5 name: databaseobservers.observability.oracle.com spec: group: observability.oracle.com @@ -13,6 +11,9 @@ spec: kind: DatabaseObserver listKind: DatabaseObserverList plural: databaseobservers + shortNames: + - dbobserver + - dbobservers singular: databaseobserver scope: Namespaced versions: @@ -23,29 +24,4678 @@ spec: - jsonPath: .status.status name: Status type: string + - jsonPath: .status.version + name: Version + type: string + name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + configuration: + properties: + configMap: + properties: + key: + type: string + name: + type: string + type: object + type: object + database: + properties: + dbConnectionString: + properties: + key: + type: string + secret: + type: string + type: object + dbPassword: + properties: + key: + type: string + secret: + type: string + vaultOCID: + type: string + vaultSecretName: + type: string + type: object + dbUser: + properties: + key: + type: string + secret: + type: string + type: object + dbWallet: + properties: + key: + type: string + secret: + type: string + type: object + type: object + exporter: + properties: + deployment: + properties: + args: + items: + type: string + type: array + commands: + items: + type: string + type: array + env: + additionalProperties: + type: string + type: object + image: + type: string + labels: + additionalProperties: + type: string + type: object + podTemplate: + properties: + labels: + additionalProperties: + type: string + type: object + securityContext: + properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + service: + properties: + labels: + additionalProperties: + type: string + type: object + ports: + items: + properties: + appProtocol: + type: string + name: + type: string + nodePort: + format: int32 + type: integer + port: + format: int32 + type: integer + protocol: + default: TCP + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + type: object + type: object + inheritLabels: + items: + type: string + type: array + log: + properties: + filename: + type: string + path: + type: string + volume: + properties: + name: + type: string + persistentVolumeClaim: + properties: + claimName: + type: string + type: object + type: object + type: object + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + prometheus: + properties: + serviceMonitor: + properties: + endpoints: + items: + properties: + authorization: + properties: + credentials: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + type: string + type: object + basicAuth: + properties: + password: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenFile: + type: string + bearerTokenSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + enableHttp2: + type: boolean + filterRunning: + type: boolean + followRedirects: + type: boolean + honorLabels: + type: boolean + honorTimestamps: + type: boolean + interval: + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + metricRelabelings: + items: + properties: + action: + default: replace + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + type: string + type: object + type: array + oauth2: + properties: + clientId: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + type: object + noProxy: + type: string + proxyConnectHeader: + additionalProperties: + items: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: array + type: object + x-kubernetes-map-type: atomic + proxyFromEnvironment: + type: boolean + proxyUrl: + pattern: ^http(s)?://.+$ + type: string + scopes: + items: + type: string + type: array + tlsConfig: + properties: + ca: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + type: boolean + keySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + maxVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + minVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + serverName: + type: string + type: object + tokenUrl: + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + params: + additionalProperties: + items: + type: string + type: array + type: object + path: + type: string + port: + type: string + proxyUrl: + type: string + relabelings: + items: + properties: + action: + default: replace + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + type: string + type: object + type: array + scheme: + enum: + - http + - https + type: string + scrapeTimeout: + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + tlsConfig: + properties: + ca: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + caFile: + type: string + cert: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + certFile: + type: string + insecureSkipVerify: + type: boolean + keyFile: + type: string + keySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + maxVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + minVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + serverName: + type: string + type: object + trackTimestampsStaleness: + type: boolean + type: object + type: array + labels: + additionalProperties: + type: string + type: object + namespaceSelector: + properties: + any: + type: boolean + matchNames: + items: + type: string + type: array + type: object + type: object + type: object + replicas: + format: int32 + type: integer + sidecarVolumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + default: ext4 + type: string + kind: + type: string + readOnly: + default: false + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + default: default + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + default: /etc/ceph/keyring + type: string + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + default: xfs + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + type: boolean + storageMode: + default: ThinProvisioned + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + sidecars: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + exporterConfig: + type: string + replicas: + type: integer + status: + type: string + version: + type: string + required: + - conditions + - exporterConfig + - version + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.exporterConfig + name: ExporterConfig + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.version + name: Version + type: string name: v1alpha1 schema: openAPIV3Schema: - description: DatabaseObserver is the Schema for the databaseobservers API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: DatabaseObserverSpec defines the desired state of DatabaseObserver properties: + configuration: + properties: + configMap: + properties: + key: + type: string + name: + type: string + type: object + type: object + database: + properties: + dbConnectionString: + properties: + key: + type: string + secret: + type: string + type: object + dbPassword: + properties: + key: + type: string + secret: + type: string + vaultOCID: + type: string + vaultSecretName: + type: string + type: object + dbUser: + properties: + key: + type: string + secret: + type: string + type: object + dbWallet: + properties: + key: + type: string + secret: + type: string + type: object + type: object + exporter: + properties: + deployment: + properties: + args: + items: + type: string + type: array + commands: + items: + type: string + type: array + env: + additionalProperties: + type: string + type: object + image: + type: string + labels: + additionalProperties: + type: string + type: object + podTemplate: + properties: + labels: + additionalProperties: + type: string + type: object + securityContext: + properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + service: + properties: + labels: + additionalProperties: + type: string + type: object + ports: + items: + properties: + appProtocol: + type: string + name: + type: string + nodePort: + format: int32 + type: integer + port: + format: int32 + type: integer + protocol: + default: TCP + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + type: object + type: object + inheritLabels: + items: + type: string + type: array + log: + properties: + filename: + type: string + path: + type: string + volume: + properties: + name: + type: string + persistentVolumeClaim: + properties: + claimName: + type: string + type: object + type: object + type: object + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + prometheus: + properties: + serviceMonitor: + properties: + endpoints: + items: + properties: + authorization: + properties: + credentials: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + type: string + type: object + basicAuth: + properties: + password: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenFile: + type: string + bearerTokenSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + enableHttp2: + type: boolean + filterRunning: + type: boolean + followRedirects: + type: boolean + honorLabels: + type: boolean + honorTimestamps: + type: boolean + interval: + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + metricRelabelings: + items: + properties: + action: + default: replace + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + type: string + type: object + type: array + oauth2: + properties: + clientId: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + type: object + noProxy: + type: string + proxyConnectHeader: + additionalProperties: + items: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: array + type: object + x-kubernetes-map-type: atomic + proxyFromEnvironment: + type: boolean + proxyUrl: + pattern: ^http(s)?://.+$ + type: string + scopes: + items: + type: string + type: array + tlsConfig: + properties: + ca: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + type: boolean + keySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + maxVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + minVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + serverName: + type: string + type: object + tokenUrl: + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + params: + additionalProperties: + items: + type: string + type: array + type: object + path: + type: string + port: + type: string + proxyUrl: + type: string + relabelings: + items: + properties: + action: + default: replace + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + type: string + type: object + type: array + scheme: + enum: + - http + - https + type: string + scrapeTimeout: + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + tlsConfig: + properties: + ca: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + caFile: + type: string + cert: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + certFile: + type: string + insecureSkipVerify: + type: boolean + keyFile: + type: string + keySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + maxVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + minVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + serverName: + type: string + type: object + trackTimestampsStaleness: + type: boolean + type: object + type: array + labels: + additionalProperties: + type: string + type: object + namespaceSelector: + properties: + any: + type: boolean + matchNames: + items: + type: string + type: array + type: object + type: object + type: object + replicas: + format: int32 + type: integer + sidecarVolumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + default: ext4 + type: string + kind: + type: string + readOnly: + default: false + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + default: default + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + default: /etc/ceph/keyring + type: string + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + default: xfs + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + type: boolean + storageMode: + default: ThinProvisioned + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + sidecars: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + exporterConfig: + type: string + replicas: + type: integer + status: + type: string + version: + type: string + required: + - conditions + - exporterConfig + - version + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.exporterConfig + name: ExporterConfig + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.version + name: Version + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + configuration: + properties: + configMap: + properties: + key: + type: string + name: + type: string + type: object + type: object database: - description: DatabaseObserverDatabase defines the database details - used for DatabaseObserver properties: dbConnectionString: properties: @@ -81,29 +4731,236 @@ spec: type: object type: object exporter: - description: DatabaseObserverExporterConfig defines the configuration - details related to the exporters of DatabaseObserver properties: - configuration: + deployment: properties: - configmap: - description: ConfigMapDetails defines the configmap name + args: + items: + type: string + type: array + commands: + items: + type: string + type: array + env: + additionalProperties: + type: string + type: object + image: + type: string + labels: + additionalProperties: + type: string + type: object + podTemplate: properties: - configmapName: - type: string - key: + labels: + additionalProperties: + type: string + type: object + securityContext: + properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object type: object type: object - image: - type: string service: - description: DatabaseObserverService defines the exporter service - component of DatabaseObserver properties: - port: - format: int32 - type: integer + labels: + additionalProperties: + type: string + type: object + ports: + items: + properties: + appProtocol: + type: string + name: + type: string + nodePort: + format: int32 + type: integer + port: + format: int32 + type: integer + protocol: + default: TCP + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + type: object + type: object + inheritLabels: + items: + type: string + type: array + log: + properties: + filename: + type: string + path: + type: string + volume: + properties: + name: + type: string + persistentVolumeClaim: + properties: + claimName: + type: string + type: object type: object type: object ociConfig: @@ -114,85 +4971,1992 @@ spec: type: string type: object prometheus: - description: PrometheusConfig defines the generated resources for - Prometheus properties: - labels: - additionalProperties: - type: string + serviceMonitor: + properties: + endpoints: + items: + properties: + authorization: + properties: + credentials: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + type: string + type: object + basicAuth: + properties: + password: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenFile: + type: string + bearerTokenSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + enableHttp2: + type: boolean + filterRunning: + type: boolean + followRedirects: + type: boolean + honorLabels: + type: boolean + honorTimestamps: + type: boolean + interval: + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + metricRelabelings: + items: + properties: + action: + default: replace + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + type: string + type: object + type: array + oauth2: + properties: + clientId: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + type: object + noProxy: + type: string + proxyConnectHeader: + additionalProperties: + items: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: array + type: object + x-kubernetes-map-type: atomic + proxyFromEnvironment: + type: boolean + proxyUrl: + pattern: ^http(s)?://.+$ + type: string + scopes: + items: + type: string + type: array + tlsConfig: + properties: + ca: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + type: boolean + keySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + maxVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + minVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + serverName: + type: string + type: object + tokenUrl: + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + params: + additionalProperties: + items: + type: string + type: array + type: object + path: + type: string + port: + type: string + proxyUrl: + type: string + relabelings: + items: + properties: + action: + default: replace + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + type: string + type: object + type: array + scheme: + enum: + - http + - https + type: string + scrapeTimeout: + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + tlsConfig: + properties: + ca: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + caFile: + type: string + cert: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + certFile: + type: string + insecureSkipVerify: + type: boolean + keyFile: + type: string + keySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + maxVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + minVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + serverName: + type: string + type: object + trackTimestampsStaleness: + type: boolean + type: object + type: array + labels: + additionalProperties: + type: string + type: object + namespaceSelector: + properties: + any: + type: boolean + matchNames: + items: + type: string + type: array + type: object type: object - port: - type: string type: object replicas: format: int32 type: integer + sidecarVolumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + default: ext4 + type: string + kind: + type: string + readOnly: + default: false + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + default: default + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + default: /etc/ceph/keyring + type: string + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + default: xfs + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + type: boolean + storageMode: + default: ThinProvisioned + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + sidecars: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array type: object status: - description: DatabaseObserverStatus defines the observed state of DatabaseObserver properties: conditions: - description: 'INSERT ADDITIONAL STATUS FIELD - define observed state - of cluster Important: Run "make" to regenerate code after modifying - this file' items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n \ttype FooStatus struct{ \t // Represents the observations - of a foo's current state. \t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\" \t // - +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map - \t // +listMapKey=type \t Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields - \t}" properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. maxLength: 1024 minLength: 1 pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: - description: status of the condition, one of True, False, Unknown. enum: - "True" - "False" - Unknown type: string type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -210,18 +6974,15 @@ spec: type: integer status: type: string + version: + type: string required: - conditions - exporterConfig + - version type: object type: object served: true storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index b9f3aa8c..726521b0 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -18,37 +18,52 @@ resources: - bases/database.oracle.com_dbcssystems.yaml - bases/database.oracle.com_dataguardbrokers.yaml - bases/observability.oracle.com_databaseobservers.yaml +- bases/database.oracle.com_lrests.yaml +- bases/database.oracle.com_lrpdbs.yaml +- bases/database.oracle.com_ordssrvs.yaml # +kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. # patches here are for enabling the conversion webhook for each CRD #- patches/webhook_in_provshards.yaml -#- patches/webhook_in_autonomousdatabases.yaml #- patches/webhook_in_singleinstancedatabases.yaml #- patches/webhook_in_shardingdatabases.yaml #- patches/webhook_in_pdbs.yaml #- patches/webhook_in_cdbs.yaml #- patches/webhook_in_oraclerestdataservices.yaml -#- patches/webhook_in_autonomouscontainerdatabases.yaml #- patches/webhook_in_dbcssystems.yaml #- patches/webhook_in_dataguardbrokers.yaml #- patches/webhook_in_databaseobservers.yaml +- patches/webhook_in_autonomousdatabases.yaml +- patches/webhook_in_autonomousdatabasebackups.yaml +- patches/webhook_in_autonomousdatabaserestores.yaml +- patches/webhook_in_autonomouscontainerdatabases.yaml +#- patches/webhook_in_lrests.yaml +#- patches/webhook_in_lrpdbs.yaml +#- patches/webhook_in_ordssrvs.yaml # +kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. # patches here are for enabling the CA injection for each CRD #- patches/cainjection_in_provshards.yaml -#- patches/cainjection_in_autonomousdatabases.yaml - patches/cainjection_in_singleinstancedatabases.yaml -#- patches/cainjection_in_shardingdatabases.yaml +- patches/cainjection_in_shardingdatabases.yaml - patches/cainjection_in_pdbs.yaml - patches/cainjection_in_cdbs.yaml #- patches/cainjection_in_oraclerestdataservices.yaml #- patches/cainjection_in_autonomouscontainerdatabases.yaml -#- patches/cainjection_in_dbcssystems.yaml +- patches/cainjection_in_dbcssystems.yaml #- patches/cainjection_in_dataguardbrokers.yaml #- patches/cainjection_in_databaseobservers.yaml +- patches/cainjection_in_autonomousdatabases.yaml +- patches/cainjection_in_autonomousdatabasebackups.yaml +- patches/cainjection_in_autonomousdatabaserestores.yaml +- patches/cainjection_in_autonomouscontainerdatabases.yaml +#- patches/cainjection_in_lrests.yaml +#- patches/cainjection_in_lrpdbs.yaml +#- patches/cainjection_in_ordssrvs.yaml +#- patches/cainjection_in_singleinstancedatabases.yaml # +kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_autonomouscontainerdatabases.yaml b/config/crd/patches/cainjection_in_autonomouscontainerdatabases.yaml index 3985a5ae..734407bc 100644 --- a/config/crd/patches/cainjection_in_autonomouscontainerdatabases.yaml +++ b/config/crd/patches/cainjection_in_autonomouscontainerdatabases.yaml @@ -1,6 +1,6 @@ # The following patch adds a directive for certmanager to inject CA into the CRD # CRD conversion requires k8s 1.13 or later. -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: diff --git a/config/crd/patches/cainjection_in_autonomousdatabasebackups.yaml b/config/crd/patches/cainjection_in_autonomousdatabasebackups.yaml index 78280137..9468569d 100644 --- a/config/crd/patches/cainjection_in_autonomousdatabasebackups.yaml +++ b/config/crd/patches/cainjection_in_autonomousdatabasebackups.yaml @@ -1,6 +1,6 @@ # The following patch adds a directive for certmanager to inject CA into the CRD # CRD conversion requires k8s 1.13 or later. -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: diff --git a/config/crd/patches/cainjection_in_autonomousdatabaserestores.yaml b/config/crd/patches/cainjection_in_autonomousdatabaserestores.yaml index 75894cbb..cfc941f8 100644 --- a/config/crd/patches/cainjection_in_autonomousdatabaserestores.yaml +++ b/config/crd/patches/cainjection_in_autonomousdatabaserestores.yaml @@ -1,6 +1,6 @@ # The following patch adds a directive for certmanager to inject CA into the CRD # CRD conversion requires k8s 1.13 or later. -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: diff --git a/config/crd/patches/cainjection_in_database_dataguardbrokers.yaml b/config/crd/patches/cainjection_in_database_dataguardbrokers.yaml new file mode 100644 index 00000000..6409f54c --- /dev/null +++ b/config/crd/patches/cainjection_in_database_dataguardbrokers.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: dataguardbrokers.database.oracle.com diff --git a/config/crd/patches/cainjection_in_database_lrests.yaml b/config/crd/patches/cainjection_in_database_lrests.yaml new file mode 100644 index 00000000..22f4b410 --- /dev/null +++ b/config/crd/patches/cainjection_in_database_lrests.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: lrests.database.oracle.com diff --git a/config/crd/patches/cainjection_in_database_lrpdbs.yaml b/config/crd/patches/cainjection_in_database_lrpdbs.yaml new file mode 100644 index 00000000..f6f21f4c --- /dev/null +++ b/config/crd/patches/cainjection_in_database_lrpdbs.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: lrpdbs.database.oracle.com diff --git a/config/crd/patches/cainjection_in_database_oraclerestdataservices.yaml b/config/crd/patches/cainjection_in_database_oraclerestdataservices.yaml new file mode 100644 index 00000000..d2b5d4ee --- /dev/null +++ b/config/crd/patches/cainjection_in_database_oraclerestdataservices.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: oraclerestdataservices.database.oracle.com diff --git a/config/crd/patches/cainjection_in_database_ordssrvs.yaml b/config/crd/patches/cainjection_in_database_ordssrvs.yaml new file mode 100644 index 00000000..d2bfc8bf --- /dev/null +++ b/config/crd/patches/cainjection_in_database_ordssrvs.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: ordssrvs.database.oracle.com diff --git a/config/crd/patches/cainjection_in_database_singleinstancedatabases.yaml b/config/crd/patches/cainjection_in_database_singleinstancedatabases.yaml new file mode 100644 index 00000000..b87b9351 --- /dev/null +++ b/config/crd/patches/cainjection_in_database_singleinstancedatabases.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: singleinstancedatabases.database.oracle.com diff --git a/config/crd/patches/cainjection_in_dbcssystems.yaml b/config/crd/patches/cainjection_in_dbcssystems.yaml index 9d8521ac..1c14e1fd 100644 --- a/config/crd/patches/cainjection_in_dbcssystems.yaml +++ b/config/crd/patches/cainjection_in_dbcssystems.yaml @@ -1,6 +1,6 @@ # The following patch adds a directive for certmanager to inject CA into the CRD # CRD conversion requires k8s 1.13 or later. -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: diff --git a/config/crd/patches/cainjection_in_observability_databaseobservers.yaml b/config/crd/patches/cainjection_in_observability_databaseobservers.yaml new file mode 100644 index 00000000..bef0b6c0 --- /dev/null +++ b/config/crd/patches/cainjection_in_observability_databaseobservers.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: databaseobservers.observability.oracle.com diff --git a/config/crd/patches/webhook_in_autonomouscontainerdatabases.yaml b/config/crd/patches/webhook_in_autonomouscontainerdatabases.yaml index 03a73384..6ef8f0a6 100644 --- a/config/crd/patches/webhook_in_autonomouscontainerdatabases.yaml +++ b/config/crd/patches/webhook_in_autonomouscontainerdatabases.yaml @@ -1,17 +1,19 @@ # The following patch enables conversion webhook for CRD # CRD conversion requires k8s 1.13 or later. -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: autonomouscontainerdatabases.database.oracle.com spec: conversion: strategy: Webhook - webhookClientConfig: - # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, - # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) - caBundle: Cg== - service: - namespace: system - name: webhook-service - path: /convert + webhook: + clientConfig: + service: + namespace: oracle-database-operator-system + name: oracle-database-operator-webhook-service + path: /convert + conversionReviewVersions: + - v1alpha1 + - v1 + - v4 diff --git a/config/crd/patches/webhook_in_autonomousdatabasebackups.yaml b/config/crd/patches/webhook_in_autonomousdatabasebackups.yaml index 1a4eacb6..ee363f8f 100644 --- a/config/crd/patches/webhook_in_autonomousdatabasebackups.yaml +++ b/config/crd/patches/webhook_in_autonomousdatabasebackups.yaml @@ -1,17 +1,19 @@ # The following patch enables conversion webhook for CRD # CRD conversion requires k8s 1.13 or later. -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: autonomousdatabasebackups.database.oracle.com spec: conversion: strategy: Webhook - webhookClientConfig: - # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, - # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) - caBundle: Cg== - service: - namespace: system - name: webhook-service - path: /convert + webhook: + clientConfig: + service: + namespace: oracle-database-operator-system + name: oracle-database-operator-webhook-service + path: /convert + conversionReviewVersions: + - v1alpha1 + - v1 + - v4 \ No newline at end of file diff --git a/config/crd/patches/webhook_in_autonomousdatabaserestores.yaml b/config/crd/patches/webhook_in_autonomousdatabaserestores.yaml index 0a0ed4ad..33329655 100644 --- a/config/crd/patches/webhook_in_autonomousdatabaserestores.yaml +++ b/config/crd/patches/webhook_in_autonomousdatabaserestores.yaml @@ -1,17 +1,19 @@ # The following patch enables conversion webhook for CRD # CRD conversion requires k8s 1.13 or later. -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: autonomousdatabaserestores.database.oracle.com spec: conversion: strategy: Webhook - webhookClientConfig: - # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, - # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) - caBundle: Cg== - service: - namespace: system - name: webhook-service - path: /convert + webhook: + clientConfig: + service: + namespace: oracle-database-operator-system + name: oracle-database-operator-webhook-service + path: /convert + conversionReviewVersions: + - v1alpha1 + - v1 + - v4 \ No newline at end of file diff --git a/config/crd/patches/webhook_in_autonomousdatabases.yaml b/config/crd/patches/webhook_in_autonomousdatabases.yaml index 230f9f68..c7ec554f 100644 --- a/config/crd/patches/webhook_in_autonomousdatabases.yaml +++ b/config/crd/patches/webhook_in_autonomousdatabases.yaml @@ -11,11 +11,13 @@ metadata: spec: conversion: strategy: Webhook - webhookClientConfig: - # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, - # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) - caBundle: Cg== - service: - namespace: system - name: webhook-service - path: /convert + webhook: + clientConfig: + service: + namespace: oracle-database-operator-system + name: oracle-database-operator-webhook-service + path: /convert + conversionReviewVersions: + - v1alpha1 + - v1 + - v4 diff --git a/config/crd/patches/webhook_in_lrests.yaml b/config/crd/patches/webhook_in_lrests.yaml new file mode 100644 index 00000000..01afd4b5 --- /dev/null +++ b/config/crd/patches/webhook_in_lrests.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: lrests.database.oracle.com +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/crd/patches/webhook_in_lrpdbs.yaml b/config/crd/patches/webhook_in_lrpdbs.yaml new file mode 100644 index 00000000..4120e72f --- /dev/null +++ b/config/crd/patches/webhook_in_lrpdbs.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: lrpdbs.database.oracle.com +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/crd/patches/webhook_in_ordssrvs.yaml b/config/crd/patches/webhook_in_ordssrvs.yaml new file mode 100644 index 00000000..0c3d7637 --- /dev/null +++ b/config/crd/patches/webhook_in_ordssrvs.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ordssrvs.database.oracle.com +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/database.oracle.com_DbcsSystem.yaml b/config/database.oracle.com_DbcsSystem.yaml index e933d5a4..c342c363 100644 --- a/config/database.oracle.com_DbcsSystem.yaml +++ b/config/database.oracle.com_DbcsSystem.yaml @@ -4,7 +4,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.6.1 + controller-gen.kubebuilder.io/version: v0.16.5 creationTimestamp: null name: DbcsSystem.database.oracle.com spec: @@ -16,7 +16,7 @@ spec: singular: dbcssystem scope: Namespaced versions: - - name: v1alpha1 + - name: v4 schema: openAPIV3Schema: description: DbcsSystem is the Schema for the dbcssystems API @@ -36,6 +36,53 @@ spec: spec: description: DbcsSystemSpec defines the desired state of DbcsSystem properties: + databaseId: + type: string + dbBackupId: + type: string + dbClone: + description: DbCloneConfig defines the configuration for the database + clone + properties: + dbAdminPaswordSecret: + type: string + dbDbUniqueName: + type: string + dbName: + type: string + displayName: + type: string + domain: + type: string + hostName: + type: string + initialDataStorageSizeInGB: + type: integer + kmsKeyId: + type: string + kmsKeyVersionId: + type: string + licenseModel: + type: string + privateIp: + type: string + sidPrefix: + type: string + sshPublicKeys: + items: + type: string + type: array + subnetId: + type: string + tdeWalletPasswordSecret: + type: string + required: + - dbDbUniqueName + - dbName + - displayName + - hostName + - subnetId + type: object dbSystem: properties: availabilityDomain: @@ -51,7 +98,7 @@ spec: dbAdminPaswordSecret: type: string dbBackupConfig: - description: DB Backup COnfig Network Struct + description: DB Backup Config Network Struct properties: autoBackupEnabled: type: boolean @@ -88,10 +135,19 @@ spec: type: string initialDataStorageSizeInGB: type: integer - kmsKeyId: - type: string - kmsKeyVersionId: - type: string + kmsConfig: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyName: + type: string + vaultName: + type: string + vaultType: + type: string + type: object licenseModel: type: string nodeCount: @@ -124,17 +180,80 @@ spec: - dbAdminPaswordSecret - hostName - shape - - sshPublicKeys - subnetId type: object hardLink: type: boolean id: type: string + kmsConfig: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyName: + type: string + vaultName: + type: string + vaultType: + type: string + type: object ociConfigMap: type: string ociSecret: type: string + pdbConfigs: + items: + description: PDBConfig defines details of PDB struct for DBCS systems + properties: + freeformTags: + additionalProperties: + type: string + description: '// Free-form tags for this resource. Each tag + is a simple key-value pair with no predefined name, type, + or namespace. // For more information, see Resource Tags (https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). + // Example: `{"Department": "Finance"}`' + type: object + isDelete: + description: To specify whether to delete the PDB + type: boolean + pdbAdminPassword: + description: // A strong password for PDB Admin. The password + must be at least nine characters and contain at least two + uppercase, two lowercase, two numbers, and two special characters. + The special characters must be _, \#, or -. + type: string + pdbName: + description: The name for the pluggable database (PDB). The + name is unique in the context of a Database. The name must + begin with an alphabetic character and can contain a maximum + of thirty alphanumeric characters. Special characters are + not permitted. The pluggable database name should not be same + as the container database name. + type: string + pluggableDatabaseId: + description: The OCID of the PDB for deletion purposes. + type: string + shouldPdbAdminAccountBeLocked: + description: // The locked mode of the pluggable database admin + account. If false, the user needs to provide the PDB Admin + Password to connect to it. // If true, the pluggable database + will be locked and user cannot login to it. + type: boolean + tdeWalletPassword: + description: // The existing TDE wallet password of the CDB. + type: string + required: + - freeformTags + - pdbAdminPassword + - pdbName + - shouldPdbAdminAccountBeLocked + - tdeWalletPassword + type: object + type: array + setupDBCloning: + type: boolean required: - ociConfigMap type: object @@ -149,6 +268,35 @@ spec: type: integer dataStorageSizeInGBs: type: integer + dbCloneStatus: + description: DbCloneStatus defines the observed state of DbClone + properties: + dbAdminPaswordSecret: + type: string + dbDbUniqueName: + type: string + dbName: + type: string + displayName: + type: string + domain: + type: string + hostName: + type: string + id: + type: string + licenseModel: + type: string + sshPublicKeys: + items: + type: string + type: array + subnetId: + type: string + required: + - dbDbUniqueName + - hostName + type: object dbEdition: type: string dbInfo: @@ -171,6 +319,25 @@ spec: type: string id: type: string + kmsDetailsStatus: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyId: + type: string + keyName: + type: string + managementEndpoint: + type: string + vaultId: + type: string + vaultName: + type: string + vaultType: + type: string + type: object licenseModel: type: string network: @@ -192,6 +359,32 @@ spec: type: object nodeCount: type: integer + pdbDetailsStatus: + items: + properties: + pdbConfigStatus: + items: + properties: + freeformTags: + additionalProperties: + type: string + type: object + pdbName: + type: string + pdbState: + type: string + pluggableDatabaseId: + type: string + shouldPdbAdminAccountBeLocked: + type: boolean + required: + - freeformTags + - pdbName + - shouldPdbAdminAccountBeLocked + type: object + type: array + type: object + type: array recoStorageSizeInGB: type: integer shape: diff --git a/config/database.oracle.com_shardingdatabases.yaml b/config/database.oracle.com_shardingdatabases.yaml index 641629a0..bb9bbd38 100644 --- a/config/database.oracle.com_shardingdatabases.yaml +++ b/config/database.oracle.com_shardingdatabases.yaml @@ -4,7 +4,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.6.1 + controller-gen.kubebuilder.io/version: v0.16.5 creationTimestamp: null name: shardingdatabases.database.oracle.com spec: @@ -27,7 +27,7 @@ spec: name: shards priority: 1 type: string - name: v1alpha1 + name: v4 schema: openAPIV3Schema: description: ShardingDatabase is the Schema for the shardingdatabases API diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 2aed83d4..1a9d97d3 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -8,5 +8,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization images: - name: controller - newName: container-registry.oracle.com/database/operator + newName: lin.ocir.io/intsanjaysingh/mmalvezz/testppr/operatormntnns newTag: latest diff --git a/config/manifests/bases/oracle-database-operator.clusterserviceversion.yaml b/config/manifests/bases/oracle-database-operator.clusterserviceversion.yaml index 23cd7c00..933a2bfa 100644 --- a/config/manifests/bases/oracle-database-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/oracle-database-operator.clusterserviceversion.yaml @@ -16,7 +16,7 @@ spec: displayName: Dbcs System kind: DbcsSystem name: DbcsSystem.database.oracle.com - version: v1alpha1 + version: v4 - description: AutonomousContainerDatabase is the Schema for the autonomouscontainerdatabases API displayName: Autonomous Container Database @@ -70,7 +70,7 @@ spec: displayName: Sharding Database kind: ShardingDatabase name: shardingdatabases.database.oracle.com - version: v1alpha1 + version: v4 - description: SingleInstanceDatabase is the Schema for the singleinstancedatabases API displayName: Single Instance Database diff --git a/config/observability.oracle.com_databaseobservers.yaml b/config/observability.oracle.com_databaseobservers.yaml index b0801738..c69a3b99 100644 --- a/config/observability.oracle.com_databaseobservers.yaml +++ b/config/observability.oracle.com_databaseobservers.yaml @@ -1,4 +1,3 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -23,6 +22,3181 @@ spec: - jsonPath: .status.status name: Status type: string + - jsonPath: .status.version + name: Version + type: string + name: v1 + schema: + openAPIV3Schema: + description: DatabaseObserver is the Schema for the databaseobservers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DatabaseObserverSpec defines the desired state of DatabaseObserver + properties: + configuration: + properties: + configMap: + description: ConfigMapDetails defines the configmap name + properties: + key: + type: string + name: + type: string + type: object + type: object + database: + description: DatabaseObserverDatabase defines the database details + used for DatabaseObserver + properties: + dbConnectionString: + properties: + key: + type: string + secret: + type: string + type: object + dbPassword: + properties: + key: + type: string + secret: + type: string + vaultOCID: + type: string + vaultSecretName: + type: string + type: object + dbRole: + type: string + dbUser: + properties: + key: + type: string + secret: + type: string + type: object + dbWallet: + properties: + key: + type: string + secret: + type: string + type: object + type: object + exporter: + description: DatabaseObserverExporterConfig defines the configuration + details related to the exporters of DatabaseObserver + properties: + args: + items: + type: string + type: array + commands: + items: + type: string + type: array + image: + type: string + service: + description: DatabaseObserverService defines the exporter service + component of DatabaseObserver + properties: + port: + format: int32 + type: integer + type: object + type: object + inherit_labels: + items: + type: string + type: array + log: + description: LogConfig defines the configuration details relation + to the logs of DatabaseObserver + properties: + filename: + type: string + path: + type: string + volume: + properties: + name: + type: string + persistentVolumeClaim: + properties: + claimName: + type: string + type: object + type: object + type: object + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + prometheus: + description: PrometheusConfig defines the generated resources for + Prometheus + properties: + port: + type: string + release: + type: string + type: object + replicas: + format: int32 + type: integer + sidecarVolumes: + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'awsElasticBlockStore represents an AWS Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that + you want to mount. If omitted, the default is to mount + by volume name. Examples: For volume /dev/sda1, you specify + the partition as "1". Similarly, the volume partition + for /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'readOnly value true will force the readOnly + setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'volumeID is unique ID of the persistent disk + resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: None, + Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in the + blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the blob + storage + type: string + fsType: + description: fsType is Filesystem type to mount. Must be + a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single blob + disk per storage account Managed: azure managed data + disk (only in managed availability set). defaults to shared' + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that contains + Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: 'monitors is Required: Monitors is a collection + of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted root, + rather than the full Ceph tree, default is /' + type: string + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile is the + path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is reference + to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is optional: User is the rados user name, + default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'cinder represents a cinder volume attached and + mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to + be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'secretRef is optional: points to a secret + object containing parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits used to + set permissions on created files by default. Must be an + octal value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items if unspecified, each key-value pair in + the Data field of the referenced ConfigMap will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the ConfigMap, the volume setup will error unless it is + marked optional. Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to + set permissions on this file. Must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file + to map the key to. May not be an absolute path. + May not contain the path element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + csi: + description: csi (Container Storage Interface) represents ephemeral + storage that is handled by certain external CSI drivers (Beta + feature). + properties: + driver: + description: driver is the name of the CSI driver that handles + this volume. Consult with your admin for the correct name + as registered in the cluster. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated + CSI driver which will determine the default filesystem + to apply. + type: string + nodePublishSecretRef: + description: nodePublishSecretRef is a reference to the + secret object containing sensitive information to pass + to the CSI driver to complete the CSI NodePublishVolume + and NodeUnpublishVolume calls. This field is optional, + and may be empty if no secret is required. If the secret + object contains more than one secret, all secret references + are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: readOnly specifies a read-only configuration + for the volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: volumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a Optional: mode bits used to set + permissions on created files by default. Must be an octal + value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set permissions + on this file, must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'emptyDir represents a temporary directory that + shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'medium represents what type of storage medium + should back this directory. The default is "" which means + to use the node''s default medium. Must be an empty string + (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'sizeLimit is the total amount of local storage + required for this EmptyDir volume. The size limit is also + applicable for memory medium. The maximum usage on memory + medium EmptyDir would be the minimum value between the + SizeLimit specified here and the sum of memory limits + of all containers in a pod. The default is nil which means + that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "ephemeral represents a volume that is handled + by a cluster storage driver. The volume's lifecycle is tied + to the pod that defines it - it will be created before the + pod starts, and deleted when the pod is removed. \n Use this + if: a) the volume is only needed while the pod runs, b) features + of normal volumes like restoring from snapshot or capacity + \ tracking are needed, c) the storage driver is specified + through a storage class, and d) the storage driver supports + dynamic volume provisioning through a PersistentVolumeClaim + (see EphemeralVolumeSource for more information on the + connection between this volume type and PersistentVolumeClaim). + \n Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. \n Use CSI for light-weight local ephemeral + volumes if the CSI driver is meant to be used that way - see + the documentation of the driver for more information. \n A + pod can use both types of ephemeral volumes and persistent + volumes at the same time." + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC to + provision the volume. The pod in which this EphemeralVolumeSource + is embedded will be the owner of the PVC, i.e. the PVC + will be deleted together with the pod. The name of the + PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. + Pod validation will reject the pod if the concatenated + name is not valid for a PVC (for example, too long). \n + An existing PVC with that name that is not owned by the + pod will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC + is meant to be used by the pod, the PVC has to updated + with an owner reference to the pod once the pod exists. + Normally this should not be necessary, but it may be useful + when manually reconstructing a broken cluster. \n This + field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. \n Required, must + not be nil." + properties: + metadata: + description: May contain labels and annotations that + will be copied into the PVC when creating it. No other + fields are allowed and will be rejected during validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + The entire content is copied unchanged into the PVC + that gets created from this template. The same fields + as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'accessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify + either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the + provisioner or an external controller can support + the specified data source, it will create a new + volume based on the contents of the specified + data source. When the AnyVolumeDataSource feature + gate is enabled, dataSource contents will be copied + to dataSourceRef, and dataSourceRef contents will + be copied to dataSource when dataSourceRef.namespace + is not specified. If the namespace is specified, + then dataSourceRef will not be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'dataSourceRef specifies the object + from which to populate the volume with data, if + a non-empty volume is desired. This may be any + object from a non-empty API group (non core object) + or a PersistentVolumeClaim object. When this field + is specified, volume binding will only succeed + if the type of the specified object matches some + installed volume populator or dynamic provisioner. + This field will replace the functionality of the + dataSource field and as such if both fields are + non-empty, they must have the same value. For + backwards compatibility, when namespace isn''t + specified in dataSourceRef, both fields (dataSource + and dataSourceRef) will be set to the same value + automatically if one of them is empty and the + other is non-empty. When namespace is specified + in dataSourceRef, dataSource isn''t set to the + same value and must be empty. There are three + important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types + of objects, dataSourceRef allows any non-core + object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping + them), dataSourceRef preserves all values, and + generates an error if a disallowed value is specified. + * While dataSource only allows local objects, + dataSourceRef allows objects in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource + feature gate to be enabled. (Alpha) Using the + namespace field of dataSourceRef requires the + CrossNamespaceVolumeDataSource feature gate to + be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: Namespace is the namespace of resource + being referenced Note that when a namespace + is specified, a gateway.networking.k8s.io/ReferenceGrant + object is required in the referent namespace + to allow that namespace's owner to accept + the reference. See the ReferenceGrant documentation + for details. (Alpha) This field requires the + CrossNamespaceVolumeDataSource feature gate + to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify + resource requirements that are lower than previous + value but must still be higher than capacity recorded + in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. If Requests + is omitted for a container, it defaults to + Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'storageClassName is the name of the + StorageClass required by the claim. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeAttributesClassName: + description: 'volumeAttributesClassName may be used + to set the VolumeAttributesClass used by this + claim. If specified, the CSI driver will create + or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This + has a different purpose than storageClassName, + it can be changed after the claim is created. + An empty string value means that no VolumeAttributesClass + will be applied to the claim but it''s not allowed + to reset this field to empty string once it is + set. If unspecified and the PersistentVolumeClaim + is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller + if it exists. If the resource referred to by volumeAttributesClass + does not exist, this PersistentVolumeClaim will + be set to a Pending state, as reflected by the + modifyVolumeStatus field, until such as a resource + exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + (Alpha) Using this field requires the VolumeAttributesClass + feature gate to be enabled.' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem + is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. TODO: how do we prevent errors in the + filesystem from compromising the machine' + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + wwids: + description: 'wwids Optional: FC volume world wide identifiers + (wwids) Either wwids or combination of targetWWNs and + lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: flexVolume represents a generic volume resource + that is provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use for + this volume. + type: string + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends + on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds extra + command options if any.' + type: object + readOnly: + description: 'readOnly is Optional: defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'secretRef is Optional: secretRef is reference + to the secret object containing sensitive information + to pass to the plugin scripts. This may be empty if no + secret object is specified. If the secret object contains + more than one secret, all secrets are passed to the plugin + scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached to + a kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: datasetName is Name of the dataset stored as + metadata -> name on the dataset for Flocker should be + considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. This + is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'gcePersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'fsType is filesystem type of the volume that + you want to mount. Tip: Ensure that the filesystem type + is supported by the host operating system. Examples: "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that + you want to mount. If omitted, the default is to mount + by volume name. Examples: For volume /dev/sda1, you specify + the partition as "1". Similarly, the volume partition + for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'pdName is unique name of the PD resource in + GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'gitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: directory is the target directory name. Must + not contain or start with '..'. If '.' is supplied, the + volume directory will be the git repository. Otherwise, + if specified, the volume will contain the git repository + in the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'glusterfs represents a Glusterfs mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'endpoints is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'readOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to + false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'hostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'path of the directory on the host. If the + path is a symlink, it will follow the link to the real + path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'iscsi represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to + the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support iSCSI + Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support iSCSI + Session CHAP authentication + type: boolean + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator + Name. If initiatorName is specified with iscsiInterface + simultaneously, new iSCSI interface : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iscsiInterface is the interface Name that uses + an iSCSI transport. Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal List. The + portal is either an IP or ip_addr:port if the port is + other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI target + and initiator authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: targetPortal is iSCSI Target Portal. The Portal + is either an IP or ip_addr:port if the port is other than + default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'name of the volume. Must be a DNS_LABEL and unique + within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'nfs represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'readOnly here will force the NFS export to + be mounted with read-only permissions. Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'persistentVolumeClaimVolumeSource represents a + reference to a PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'claimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: readOnly Will force the ReadOnly setting in + VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used to set permissions + on created files by default. Must be an octal value between + 0000 and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires decimal + values for mode bits. Directories within the path are + not affected by this setting. This might be in conflict + with other options that affect the file mode, like fsGroup, + and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + clusterTrustBundle: + description: "ClusterTrustBundle allows a pod to access + the `.spec.trustBundle` field of ClusterTrustBundle + objects in an auto-updating file. \n Alpha, gated + by the ClusterTrustBundleProjection feature gate. + \n ClusterTrustBundle objects can either be selected + by name, or by the combination of signer name and + a label selector. \n Kubelet performs aggressive + normalization of the PEM contents written into the + pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates + are deduplicated. The ordering of certificates within + the file is arbitrary, and Kubelet may change the + order over time." + properties: + labelSelector: + description: Select all ClusterTrustBundles that + match this label selector. Only has effect + if signerName is set. Mutually-exclusive with + name. If unset, interpreted as "match nothing". If + set but empty, interpreted as "match everything". + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + name: + description: Select a single ClusterTrustBundle + by object name. Mutually-exclusive with signerName + and labelSelector. + type: string + optional: + description: If true, don't block pod startup + if the referenced ClusterTrustBundle(s) aren't + available. If using name, then the named ClusterTrustBundle + is allowed not to exist. If using signerName, + then the combination of signerName and labelSelector + is allowed to match zero ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: Select all ClusterTrustBundles that + match this signer name. Mutually-exclusive with + name. The contents of all selected ClusterTrustBundles + will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced ConfigMap + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the ConfigMap, the volume + setup will error unless it is marked optional. + Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to + set permissions on this file, must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the + container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu + and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret data + to project + properties: + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced Secret + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the Secret, the volume setup + will error unless it is marked optional. Paths + must be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: optional field specify whether the + Secret or its key must be defined + type: boolean + type: object + serviceAccountToken: + description: serviceAccountToken is information about + the serviceAccountToken data to project + properties: + audience: + description: audience is the intended audience + of the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: expirationSeconds is the requested + duration of validity of the service account + token. As the token approaches expiration, the + kubelet volume plugin will proactively rotate + the service account token. The kubelet will + start trying to rotate the token if the token + is older than 80 percent of its time to live + or if the token is older than 24 hours.Defaults + to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: path is the path relative to the + mount point of the file to project the token + into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: group to map volume access to Default is no + group + type: string + readOnly: + description: readOnly here will force the Quobyte volume + to be mounted with read-only permissions. Defaults to + false. + type: boolean + registry: + description: registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: user to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'rbd represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + image: + description: 'image is the rados image name. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'pool is the rados pool name. Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'secretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is + nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is the rados user name. Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage for + a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool associated + with the protection domain. + type: string + system: + description: system is the name of the storage system as + configured in ScaleIO. + type: string + volumeName: + description: volumeName is the name of a volume already + created in the ScaleIO system that is associated with + this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits used to + set permissions on created files by default. Must be an + octal value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items If unspecified, each key-value pair in + the Data field of the referenced Secret will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the Secret, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to + set permissions on this file. Must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file + to map the key to. May not be an absolute path. + May not contain the path element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret or + its keys must be defined + type: boolean + secretName: + description: 'secretName is the name of the secret in the + pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: volumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within + a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope of the + volume within StorageOS. If no namespace is specified + then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS + for tighter integration. Set VolumeName to any name to + override the default behaviour. Set to "default" if you + are not using namespaces within StorageOS. Namespaces + that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: fsType is filesystem type to mount. Must be + a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy Based + Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies vSphere + volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + sidecars: + items: + description: A single application container that you want to run + within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will + be unchanged. Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the variable + exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. + The container image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: + i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether + the variable exists or not. Cannot be updated. More info: + https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be + a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. + If a variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, requests.cpu, + requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be + a C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key + will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap must be + defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take + in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More + info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to + sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The Pod''s termination grace period + countdown begins before the PreStop hook is executed. + Regardless of the outcome of the handler, the container + will eventually terminate within the Pod''s termination + grace period (unless delayed by finalizers). Other management + of the container blocks until the hook completes or until + the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to + sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not + specifying a port here DOES NOT prevent that port from being + exposed. Any port which is listening on the default "0.0.0.0" + address inside a container will be accessible from the network. + Modifying this array with strategic merge patch may corrupt + the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in a + single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP + address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If + specified, this must be a valid port number, 0 < x < + 65536. If HostNetwork is specified, this must match + ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod + must have a unique name. Name for the port that can + be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe + fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource resize + policy for the container. + properties: + resourceName: + description: 'Name of the resource to which this resource + resize policy applies. Supported values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified resource + is resized. If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: 'Compute Resources required by this container. + Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only + be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior of + individual containers in a pod. This field may only be set + for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod''s restart policy + and the container type. Setting the RestartPolicy as "Always" + for the init container will have the following effect: this + init container will be continually restarted on exit until + all regular containers have terminated. Once all regular containers + have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init + containers and is often referred to as a "sidecar" container. + Although this init container still starts in the init container + sequence, it does not wait for the container to complete before + proceeding to the next init container. Instead, the next init + container starts immediately after this init container is + started, or after any startupProbe has successfully completed.' + type: string + securityContext: + description: 'SecurityContext defines the security options the + container should be run with. If set, the fields of SecurityContext + override the equivalent fields of PodSecurityContext. More + info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether + a process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN Note that this field cannot be set + when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by + the container runtime. Note that this field cannot be + set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent to + root on the host. Defaults to false. Note that this field + cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to + use for the containers. The default is DefaultProcMount + which uses the container runtime defaults for readonly + paths and masked paths. This requires the ProcMountType + feature flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root + filesystem. Default is false. Note that this field cannot + be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a + non-root user. If true, the Kubelet will validate the + image at runtime to ensure that it does not run as UID + 0 (root) and fail to start the container if it does. If + unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both + SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata + if unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a + random SELinux context for each container. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. + Note that this field cannot be set when spec.os.name is + windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile + must be preconfigured on the node to work. Must be + a descending path, relative to the kubelet's configured + seccomp profile location. Must be set if type is "Localhost". + Must NOT be set for any other type. + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - + a profile defined in a file on the node should be + used. RuntimeDefault - the container runtime default + profile should be used. Unconfined - no profile should + be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is + linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. All of a Pod's + containers must have the same effective HostProcess + value (it is not allowed to have a mix of HostProcess + containers and non-HostProcess containers). In addition, + if HostProcess is true then HostNetwork must also + be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod + will be restarted, just as if the livenessProbe failed. This + can be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. + This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer + for stdin in the container runtime. If this is not set, reads + from stdin in the container will always result in EOF. Default + is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the + stdin channel after it has been opened by a single attach. + When stdin is true the stdin stream will remain open across + multiple attach sessions. If stdinOnce is set to true, stdin + is opened on container start, is empty until the first client + attaches to stdin, and then remains open and accepts data + until the client disconnects, at which time stdin is closed + and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the + container''s termination message will be written is mounted + into the container''s filesystem. Message written is intended + to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. + The total message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be + populated. File will use the contents of terminationMessagePath + to populate the container status message on both success and + failure. FallbackToLogsOnError will use the last chunk of + container log output if the termination message file is empty + and the container exited with an error. The log output is + limited to 2048 bytes or 80 lines, whichever is smaller. Defaults + to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other + way around. When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might + be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + type: object + status: + description: DatabaseObserverStatus defines the observed state of DatabaseObserver + properties: + conditions: + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n \ttype FooStatus struct{ \t // Represents the observations + of a foo's current state. \t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\" \t // + +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map + \t // +listMapKey=type \t Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields + \t}" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + exporterConfig: + type: string + replicas: + type: integer + status: + type: string + version: + type: string + required: + - conditions + - exporterConfig + - version + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.exporterConfig + name: ExporterConfig + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.version + name: Version + type: string name: v1alpha1 schema: openAPIV3Schema: @@ -43,6 +3217,3189 @@ spec: spec: description: DatabaseObserverSpec defines the desired state of DatabaseObserver properties: + configuration: + properties: + configMap: + description: ConfigMapDetails defines the configmap name + properties: + key: + type: string + name: + type: string + type: object + type: object + database: + description: DatabaseObserverDatabase defines the database details + used for DatabaseObserver + properties: + dbConnectionString: + properties: + key: + type: string + secret: + type: string + type: object + dbPassword: + properties: + key: + type: string + secret: + type: string + vaultOCID: + type: string + vaultSecretName: + type: string + type: object + dbRole: + type: string + dbUser: + properties: + key: + type: string + secret: + type: string + type: object + dbWallet: + properties: + key: + type: string + secret: + type: string + type: object + type: object + exporter: + description: DatabaseObserverExporterConfig defines the configuration + details related to the exporters of DatabaseObserver + properties: + args: + items: + type: string + type: array + commands: + items: + type: string + type: array + image: + type: string + service: + description: DatabaseObserverService defines the exporter service + component of DatabaseObserver + properties: + port: + format: int32 + type: integer + type: object + type: object + inherit_labels: + items: + type: string + type: array + log: + description: LogConfig defines the configuration details relation + to the logs of DatabaseObserver + properties: + filename: + type: string + path: + type: string + volume: + properties: + name: + type: string + persistentVolumeClaim: + properties: + claimName: + type: string + type: object + type: object + type: object + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + prometheus: + description: PrometheusConfig defines the generated resources for + Prometheus + properties: + port: + type: string + release: + type: string + type: object + replicas: + format: int32 + type: integer + sidecarVolumes: + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'awsElasticBlockStore represents an AWS Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that + you want to mount. If omitted, the default is to mount + by volume name. Examples: For volume /dev/sda1, you specify + the partition as "1". Similarly, the volume partition + for /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'readOnly value true will force the readOnly + setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'volumeID is unique ID of the persistent disk + resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: None, + Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in the + blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the blob + storage + type: string + fsType: + description: fsType is Filesystem type to mount. Must be + a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single blob + disk per storage account Managed: azure managed data + disk (only in managed availability set). defaults to shared' + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that contains + Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: 'monitors is Required: Monitors is a collection + of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted root, + rather than the full Ceph tree, default is /' + type: string + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile is the + path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is reference + to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is optional: User is the rados user name, + default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'cinder represents a cinder volume attached and + mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to + be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'secretRef is optional: points to a secret + object containing parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits used to + set permissions on created files by default. Must be an + octal value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items if unspecified, each key-value pair in + the Data field of the referenced ConfigMap will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the ConfigMap, the volume setup will error unless it is + marked optional. Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to + set permissions on this file. Must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file + to map the key to. May not be an absolute path. + May not contain the path element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + csi: + description: csi (Container Storage Interface) represents ephemeral + storage that is handled by certain external CSI drivers (Beta + feature). + properties: + driver: + description: driver is the name of the CSI driver that handles + this volume. Consult with your admin for the correct name + as registered in the cluster. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated + CSI driver which will determine the default filesystem + to apply. + type: string + nodePublishSecretRef: + description: nodePublishSecretRef is a reference to the + secret object containing sensitive information to pass + to the CSI driver to complete the CSI NodePublishVolume + and NodeUnpublishVolume calls. This field is optional, + and may be empty if no secret is required. If the secret + object contains more than one secret, all secret references + are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: readOnly specifies a read-only configuration + for the volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: volumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a Optional: mode bits used to set + permissions on created files by default. Must be an octal + value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set permissions + on this file, must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'emptyDir represents a temporary directory that + shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'medium represents what type of storage medium + should back this directory. The default is "" which means + to use the node''s default medium. Must be an empty string + (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'sizeLimit is the total amount of local storage + required for this EmptyDir volume. The size limit is also + applicable for memory medium. The maximum usage on memory + medium EmptyDir would be the minimum value between the + SizeLimit specified here and the sum of memory limits + of all containers in a pod. The default is nil which means + that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "ephemeral represents a volume that is handled + by a cluster storage driver. The volume's lifecycle is tied + to the pod that defines it - it will be created before the + pod starts, and deleted when the pod is removed. \n Use this + if: a) the volume is only needed while the pod runs, b) features + of normal volumes like restoring from snapshot or capacity + \ tracking are needed, c) the storage driver is specified + through a storage class, and d) the storage driver supports + dynamic volume provisioning through a PersistentVolumeClaim + (see EphemeralVolumeSource for more information on the + connection between this volume type and PersistentVolumeClaim). + \n Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. \n Use CSI for light-weight local ephemeral + volumes if the CSI driver is meant to be used that way - see + the documentation of the driver for more information. \n A + pod can use both types of ephemeral volumes and persistent + volumes at the same time." + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC to + provision the volume. The pod in which this EphemeralVolumeSource + is embedded will be the owner of the PVC, i.e. the PVC + will be deleted together with the pod. The name of the + PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. + Pod validation will reject the pod if the concatenated + name is not valid for a PVC (for example, too long). \n + An existing PVC with that name that is not owned by the + pod will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC + is meant to be used by the pod, the PVC has to updated + with an owner reference to the pod once the pod exists. + Normally this should not be necessary, but it may be useful + when manually reconstructing a broken cluster. \n This + field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. \n Required, must + not be nil." + properties: + metadata: + description: May contain labels and annotations that + will be copied into the PVC when creating it. No other + fields are allowed and will be rejected during validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + The entire content is copied unchanged into the PVC + that gets created from this template. The same fields + as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'accessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify + either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the + provisioner or an external controller can support + the specified data source, it will create a new + volume based on the contents of the specified + data source. When the AnyVolumeDataSource feature + gate is enabled, dataSource contents will be copied + to dataSourceRef, and dataSourceRef contents will + be copied to dataSource when dataSourceRef.namespace + is not specified. If the namespace is specified, + then dataSourceRef will not be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'dataSourceRef specifies the object + from which to populate the volume with data, if + a non-empty volume is desired. This may be any + object from a non-empty API group (non core object) + or a PersistentVolumeClaim object. When this field + is specified, volume binding will only succeed + if the type of the specified object matches some + installed volume populator or dynamic provisioner. + This field will replace the functionality of the + dataSource field and as such if both fields are + non-empty, they must have the same value. For + backwards compatibility, when namespace isn''t + specified in dataSourceRef, both fields (dataSource + and dataSourceRef) will be set to the same value + automatically if one of them is empty and the + other is non-empty. When namespace is specified + in dataSourceRef, dataSource isn''t set to the + same value and must be empty. There are three + important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types + of objects, dataSourceRef allows any non-core + object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping + them), dataSourceRef preserves all values, and + generates an error if a disallowed value is specified. + * While dataSource only allows local objects, + dataSourceRef allows objects in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource + feature gate to be enabled. (Alpha) Using the + namespace field of dataSourceRef requires the + CrossNamespaceVolumeDataSource feature gate to + be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: Namespace is the namespace of resource + being referenced Note that when a namespace + is specified, a gateway.networking.k8s.io/ReferenceGrant + object is required in the referent namespace + to allow that namespace's owner to accept + the reference. See the ReferenceGrant documentation + for details. (Alpha) This field requires the + CrossNamespaceVolumeDataSource feature gate + to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify + resource requirements that are lower than previous + value but must still be higher than capacity recorded + in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. If Requests + is omitted for a container, it defaults to + Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'storageClassName is the name of the + StorageClass required by the claim. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeAttributesClassName: + description: 'volumeAttributesClassName may be used + to set the VolumeAttributesClass used by this + claim. If specified, the CSI driver will create + or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This + has a different purpose than storageClassName, + it can be changed after the claim is created. + An empty string value means that no VolumeAttributesClass + will be applied to the claim but it''s not allowed + to reset this field to empty string once it is + set. If unspecified and the PersistentVolumeClaim + is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller + if it exists. If the resource referred to by volumeAttributesClass + does not exist, this PersistentVolumeClaim will + be set to a Pending state, as reflected by the + modifyVolumeStatus field, until such as a resource + exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + (Alpha) Using this field requires the VolumeAttributesClass + feature gate to be enabled.' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem + is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. TODO: how do we prevent errors in the + filesystem from compromising the machine' + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + wwids: + description: 'wwids Optional: FC volume world wide identifiers + (wwids) Either wwids or combination of targetWWNs and + lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: flexVolume represents a generic volume resource + that is provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use for + this volume. + type: string + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends + on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds extra + command options if any.' + type: object + readOnly: + description: 'readOnly is Optional: defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'secretRef is Optional: secretRef is reference + to the secret object containing sensitive information + to pass to the plugin scripts. This may be empty if no + secret object is specified. If the secret object contains + more than one secret, all secrets are passed to the plugin + scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached to + a kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: datasetName is Name of the dataset stored as + metadata -> name on the dataset for Flocker should be + considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. This + is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'gcePersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'fsType is filesystem type of the volume that + you want to mount. Tip: Ensure that the filesystem type + is supported by the host operating system. Examples: "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that + you want to mount. If omitted, the default is to mount + by volume name. Examples: For volume /dev/sda1, you specify + the partition as "1". Similarly, the volume partition + for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'pdName is unique name of the PD resource in + GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'gitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: directory is the target directory name. Must + not contain or start with '..'. If '.' is supplied, the + volume directory will be the git repository. Otherwise, + if specified, the volume will contain the git repository + in the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'glusterfs represents a Glusterfs mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'endpoints is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'readOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to + false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'hostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'path of the directory on the host. If the + path is a symlink, it will follow the link to the real + path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'iscsi represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to + the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support iSCSI + Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support iSCSI + Session CHAP authentication + type: boolean + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator + Name. If initiatorName is specified with iscsiInterface + simultaneously, new iSCSI interface : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iscsiInterface is the interface Name that uses + an iSCSI transport. Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal List. The + portal is either an IP or ip_addr:port if the port is + other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI target + and initiator authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: targetPortal is iSCSI Target Portal. The Portal + is either an IP or ip_addr:port if the port is other than + default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'name of the volume. Must be a DNS_LABEL and unique + within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'nfs represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'readOnly here will force the NFS export to + be mounted with read-only permissions. Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'persistentVolumeClaimVolumeSource represents a + reference to a PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'claimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: readOnly Will force the ReadOnly setting in + VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used to set permissions + on created files by default. Must be an octal value between + 0000 and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires decimal + values for mode bits. Directories within the path are + not affected by this setting. This might be in conflict + with other options that affect the file mode, like fsGroup, + and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + clusterTrustBundle: + description: "ClusterTrustBundle allows a pod to access + the `.spec.trustBundle` field of ClusterTrustBundle + objects in an auto-updating file. \n Alpha, gated + by the ClusterTrustBundleProjection feature gate. + \n ClusterTrustBundle objects can either be selected + by name, or by the combination of signer name and + a label selector. \n Kubelet performs aggressive + normalization of the PEM contents written into the + pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates + are deduplicated. The ordering of certificates within + the file is arbitrary, and Kubelet may change the + order over time." + properties: + labelSelector: + description: Select all ClusterTrustBundles that + match this label selector. Only has effect + if signerName is set. Mutually-exclusive with + name. If unset, interpreted as "match nothing". If + set but empty, interpreted as "match everything". + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + name: + description: Select a single ClusterTrustBundle + by object name. Mutually-exclusive with signerName + and labelSelector. + type: string + optional: + description: If true, don't block pod startup + if the referenced ClusterTrustBundle(s) aren't + available. If using name, then the named ClusterTrustBundle + is allowed not to exist. If using signerName, + then the combination of signerName and labelSelector + is allowed to match zero ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: Select all ClusterTrustBundles that + match this signer name. Mutually-exclusive with + name. The contents of all selected ClusterTrustBundles + will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced ConfigMap + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the ConfigMap, the volume + setup will error unless it is marked optional. + Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to + set permissions on this file, must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the + container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu + and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret data + to project + properties: + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced Secret + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the Secret, the volume setup + will error unless it is marked optional. Paths + must be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: optional field specify whether the + Secret or its key must be defined + type: boolean + type: object + serviceAccountToken: + description: serviceAccountToken is information about + the serviceAccountToken data to project + properties: + audience: + description: audience is the intended audience + of the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: expirationSeconds is the requested + duration of validity of the service account + token. As the token approaches expiration, the + kubelet volume plugin will proactively rotate + the service account token. The kubelet will + start trying to rotate the token if the token + is older than 80 percent of its time to live + or if the token is older than 24 hours.Defaults + to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: path is the path relative to the + mount point of the file to project the token + into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: group to map volume access to Default is no + group + type: string + readOnly: + description: readOnly here will force the Quobyte volume + to be mounted with read-only permissions. Defaults to + false. + type: boolean + registry: + description: registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: user to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'rbd represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + image: + description: 'image is the rados image name. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'pool is the rados pool name. Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'secretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is + nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is the rados user name. Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage for + a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool associated + with the protection domain. + type: string + system: + description: system is the name of the storage system as + configured in ScaleIO. + type: string + volumeName: + description: volumeName is the name of a volume already + created in the ScaleIO system that is associated with + this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits used to + set permissions on created files by default. Must be an + octal value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items If unspecified, each key-value pair in + the Data field of the referenced Secret will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the Secret, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to + set permissions on this file. Must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file + to map the key to. May not be an absolute path. + May not contain the path element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret or + its keys must be defined + type: boolean + secretName: + description: 'secretName is the name of the secret in the + pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: volumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within + a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope of the + volume within StorageOS. If no namespace is specified + then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS + for tighter integration. Set VolumeName to any name to + override the default behaviour. Set to "default" if you + are not using namespaces within StorageOS. Namespaces + that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: fsType is filesystem type to mount. Must be + a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy Based + Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies vSphere + volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + sidecars: + items: + description: A single application container that you want to run + within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will + be unchanged. Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the variable + exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. + The container image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: + i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether + the variable exists or not. Cannot be updated. More info: + https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be + a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. + If a variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, requests.cpu, + requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be + a C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key + will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap must be + defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take + in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More + info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to + sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The Pod''s termination grace period + countdown begins before the PreStop hook is executed. + Regardless of the outcome of the handler, the container + will eventually terminate within the Pod''s termination + grace period (unless delayed by finalizers). Other management + of the container blocks until the hook completes or until + the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to + sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not + specifying a port here DOES NOT prevent that port from being + exposed. Any port which is listening on the default "0.0.0.0" + address inside a container will be accessible from the network. + Modifying this array with strategic merge patch may corrupt + the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in a + single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP + address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If + specified, this must be a valid port number, 0 < x < + 65536. If HostNetwork is specified, this must match + ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod + must have a unique name. Name for the port that can + be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe + fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource resize + policy for the container. + properties: + resourceName: + description: 'Name of the resource to which this resource + resize policy applies. Supported values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified resource + is resized. If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: 'Compute Resources required by this container. + Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only + be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior of + individual containers in a pod. This field may only be set + for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod''s restart policy + and the container type. Setting the RestartPolicy as "Always" + for the init container will have the following effect: this + init container will be continually restarted on exit until + all regular containers have terminated. Once all regular containers + have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init + containers and is often referred to as a "sidecar" container. + Although this init container still starts in the init container + sequence, it does not wait for the container to complete before + proceeding to the next init container. Instead, the next init + container starts immediately after this init container is + started, or after any startupProbe has successfully completed.' + type: string + securityContext: + description: 'SecurityContext defines the security options the + container should be run with. If set, the fields of SecurityContext + override the equivalent fields of PodSecurityContext. More + info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether + a process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN Note that this field cannot be set + when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by + the container runtime. Note that this field cannot be + set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent to + root on the host. Defaults to false. Note that this field + cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to + use for the containers. The default is DefaultProcMount + which uses the container runtime defaults for readonly + paths and masked paths. This requires the ProcMountType + feature flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root + filesystem. Default is false. Note that this field cannot + be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a + non-root user. If true, the Kubelet will validate the + image at runtime to ensure that it does not run as UID + 0 (root) and fail to start the container if it does. If + unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both + SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata + if unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a + random SELinux context for each container. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. + Note that this field cannot be set when spec.os.name is + windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile + must be preconfigured on the node to work. Must be + a descending path, relative to the kubelet's configured + seccomp profile location. Must be set if type is "Localhost". + Must NOT be set for any other type. + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - + a profile defined in a file on the node should be + used. RuntimeDefault - the container runtime default + profile should be used. Unconfined - no profile should + be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is + linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. All of a Pod's + containers must have the same effective HostProcess + value (it is not allowed to have a mix of HostProcess + containers and non-HostProcess containers). In addition, + if HostProcess is true then HostNetwork must also + be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod + will be restarted, just as if the livenessProbe failed. This + can be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. + This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer + for stdin in the container runtime. If this is not set, reads + from stdin in the container will always result in EOF. Default + is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the + stdin channel after it has been opened by a single attach. + When stdin is true the stdin stream will remain open across + multiple attach sessions. If stdinOnce is set to true, stdin + is opened on container start, is empty until the first client + attaches to stdin, and then remains open and accepts data + until the client disconnects, at which time stdin is closed + and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the + container''s termination message will be written is mounted + into the container''s filesystem. Message written is intended + to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. + The total message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be + populated. File will use the contents of terminationMessagePath + to populate the container status message on both success and + failure. FallbackToLogsOnError will use the last chunk of + container log output if the termination message file is empty + and the container exited with an error. The log output is + limited to 2048 bytes or 80 lines, whichever is smaller. Defaults + to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other + way around. When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might + be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + type: object + status: + description: DatabaseObserverStatus defines the observed state of DatabaseObserver + properties: + conditions: + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n \ttype FooStatus struct{ \t // Represents the observations + of a foo's current state. \t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\" \t // + +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map + \t // +listMapKey=type \t Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields + \t}" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + exporterConfig: + type: string + replicas: + type: integer + status: + type: string + version: + type: string + required: + - conditions + - exporterConfig + - version + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.exporterConfig + name: ExporterConfig + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.version + name: Version + type: string + name: v4 + schema: + openAPIV3Schema: + description: DatabaseObserver is the Schema for the databaseobservers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DatabaseObserverSpec defines the desired state of DatabaseObserver + properties: + configuration: + properties: + configMap: + description: ConfigMapDetails defines the configmap name + properties: + key: + type: string + name: + type: string + type: object + type: object database: description: DatabaseObserverDatabase defines the database details used for DatabaseObserver @@ -65,6 +6422,8 @@ spec: vaultSecretName: type: string type: object + dbRole: + type: string dbUser: properties: key: @@ -84,17 +6443,14 @@ spec: description: DatabaseObserverExporterConfig defines the configuration details related to the exporters of DatabaseObserver properties: - configuration: - properties: - configmap: - description: ConfigMapDetails defines the configmap name - properties: - configmapName: - type: string - key: - type: string - type: object - type: object + args: + items: + type: string + type: array + commands: + items: + type: string + type: array image: type: string service: @@ -106,6 +6462,29 @@ spec: type: integer type: object type: object + inherit_labels: + items: + type: string + type: array + log: + description: LogConfig defines the configuration details relation + to the logs of DatabaseObserver + properties: + filename: + type: string + path: + type: string + volume: + properties: + name: + type: string + persistentVolumeClaim: + properties: + claimName: + type: string + type: object + type: object + type: object ociConfig: properties: configMapName: @@ -117,24 +6496,2954 @@ spec: description: PrometheusConfig defines the generated resources for Prometheus properties: - labels: - additionalProperties: - type: string - type: object port: type: string + release: + type: string type: object replicas: format: int32 type: integer + sidecarVolumes: + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'awsElasticBlockStore represents an AWS Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that + you want to mount. If omitted, the default is to mount + by volume name. Examples: For volume /dev/sda1, you specify + the partition as "1". Similarly, the volume partition + for /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'readOnly value true will force the readOnly + setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'volumeID is unique ID of the persistent disk + resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: None, + Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in the + blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the blob + storage + type: string + fsType: + description: fsType is Filesystem type to mount. Must be + a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single blob + disk per storage account Managed: azure managed data + disk (only in managed availability set). defaults to shared' + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that contains + Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: 'monitors is Required: Monitors is a collection + of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted root, + rather than the full Ceph tree, default is /' + type: string + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile is the + path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is reference + to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is optional: User is the rados user name, + default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'cinder represents a cinder volume attached and + mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to + be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'secretRef is optional: points to a secret + object containing parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits used to + set permissions on created files by default. Must be an + octal value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items if unspecified, each key-value pair in + the Data field of the referenced ConfigMap will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the ConfigMap, the volume setup will error unless it is + marked optional. Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to + set permissions on this file. Must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file + to map the key to. May not be an absolute path. + May not contain the path element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + csi: + description: csi (Container Storage Interface) represents ephemeral + storage that is handled by certain external CSI drivers (Beta + feature). + properties: + driver: + description: driver is the name of the CSI driver that handles + this volume. Consult with your admin for the correct name + as registered in the cluster. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated + CSI driver which will determine the default filesystem + to apply. + type: string + nodePublishSecretRef: + description: nodePublishSecretRef is a reference to the + secret object containing sensitive information to pass + to the CSI driver to complete the CSI NodePublishVolume + and NodeUnpublishVolume calls. This field is optional, + and may be empty if no secret is required. If the secret + object contains more than one secret, all secret references + are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: readOnly specifies a read-only configuration + for the volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: volumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a Optional: mode bits used to set + permissions on created files by default. Must be an octal + value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set permissions + on this file, must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'emptyDir represents a temporary directory that + shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'medium represents what type of storage medium + should back this directory. The default is "" which means + to use the node''s default medium. Must be an empty string + (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'sizeLimit is the total amount of local storage + required for this EmptyDir volume. The size limit is also + applicable for memory medium. The maximum usage on memory + medium EmptyDir would be the minimum value between the + SizeLimit specified here and the sum of memory limits + of all containers in a pod. The default is nil which means + that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "ephemeral represents a volume that is handled + by a cluster storage driver. The volume's lifecycle is tied + to the pod that defines it - it will be created before the + pod starts, and deleted when the pod is removed. \n Use this + if: a) the volume is only needed while the pod runs, b) features + of normal volumes like restoring from snapshot or capacity + \ tracking are needed, c) the storage driver is specified + through a storage class, and d) the storage driver supports + dynamic volume provisioning through a PersistentVolumeClaim + (see EphemeralVolumeSource for more information on the + connection between this volume type and PersistentVolumeClaim). + \n Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. \n Use CSI for light-weight local ephemeral + volumes if the CSI driver is meant to be used that way - see + the documentation of the driver for more information. \n A + pod can use both types of ephemeral volumes and persistent + volumes at the same time." + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC to + provision the volume. The pod in which this EphemeralVolumeSource + is embedded will be the owner of the PVC, i.e. the PVC + will be deleted together with the pod. The name of the + PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. + Pod validation will reject the pod if the concatenated + name is not valid for a PVC (for example, too long). \n + An existing PVC with that name that is not owned by the + pod will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC + is meant to be used by the pod, the PVC has to updated + with an owner reference to the pod once the pod exists. + Normally this should not be necessary, but it may be useful + when manually reconstructing a broken cluster. \n This + field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. \n Required, must + not be nil." + properties: + metadata: + description: May contain labels and annotations that + will be copied into the PVC when creating it. No other + fields are allowed and will be rejected during validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + The entire content is copied unchanged into the PVC + that gets created from this template. The same fields + as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'accessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify + either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the + provisioner or an external controller can support + the specified data source, it will create a new + volume based on the contents of the specified + data source. When the AnyVolumeDataSource feature + gate is enabled, dataSource contents will be copied + to dataSourceRef, and dataSourceRef contents will + be copied to dataSource when dataSourceRef.namespace + is not specified. If the namespace is specified, + then dataSourceRef will not be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'dataSourceRef specifies the object + from which to populate the volume with data, if + a non-empty volume is desired. This may be any + object from a non-empty API group (non core object) + or a PersistentVolumeClaim object. When this field + is specified, volume binding will only succeed + if the type of the specified object matches some + installed volume populator or dynamic provisioner. + This field will replace the functionality of the + dataSource field and as such if both fields are + non-empty, they must have the same value. For + backwards compatibility, when namespace isn''t + specified in dataSourceRef, both fields (dataSource + and dataSourceRef) will be set to the same value + automatically if one of them is empty and the + other is non-empty. When namespace is specified + in dataSourceRef, dataSource isn''t set to the + same value and must be empty. There are three + important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types + of objects, dataSourceRef allows any non-core + object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping + them), dataSourceRef preserves all values, and + generates an error if a disallowed value is specified. + * While dataSource only allows local objects, + dataSourceRef allows objects in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource + feature gate to be enabled. (Alpha) Using the + namespace field of dataSourceRef requires the + CrossNamespaceVolumeDataSource feature gate to + be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: Namespace is the namespace of resource + being referenced Note that when a namespace + is specified, a gateway.networking.k8s.io/ReferenceGrant + object is required in the referent namespace + to allow that namespace's owner to accept + the reference. See the ReferenceGrant documentation + for details. (Alpha) This field requires the + CrossNamespaceVolumeDataSource feature gate + to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify + resource requirements that are lower than previous + value but must still be higher than capacity recorded + in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. If Requests + is omitted for a container, it defaults to + Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'storageClassName is the name of the + StorageClass required by the claim. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeAttributesClassName: + description: 'volumeAttributesClassName may be used + to set the VolumeAttributesClass used by this + claim. If specified, the CSI driver will create + or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This + has a different purpose than storageClassName, + it can be changed after the claim is created. + An empty string value means that no VolumeAttributesClass + will be applied to the claim but it''s not allowed + to reset this field to empty string once it is + set. If unspecified and the PersistentVolumeClaim + is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller + if it exists. If the resource referred to by volumeAttributesClass + does not exist, this PersistentVolumeClaim will + be set to a Pending state, as reflected by the + modifyVolumeStatus field, until such as a resource + exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + (Alpha) Using this field requires the VolumeAttributesClass + feature gate to be enabled.' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem + is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. TODO: how do we prevent errors in the + filesystem from compromising the machine' + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + wwids: + description: 'wwids Optional: FC volume world wide identifiers + (wwids) Either wwids or combination of targetWWNs and + lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: flexVolume represents a generic volume resource + that is provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use for + this volume. + type: string + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends + on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds extra + command options if any.' + type: object + readOnly: + description: 'readOnly is Optional: defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'secretRef is Optional: secretRef is reference + to the secret object containing sensitive information + to pass to the plugin scripts. This may be empty if no + secret object is specified. If the secret object contains + more than one secret, all secrets are passed to the plugin + scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached to + a kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: datasetName is Name of the dataset stored as + metadata -> name on the dataset for Flocker should be + considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. This + is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'gcePersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'fsType is filesystem type of the volume that + you want to mount. Tip: Ensure that the filesystem type + is supported by the host operating system. Examples: "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that + you want to mount. If omitted, the default is to mount + by volume name. Examples: For volume /dev/sda1, you specify + the partition as "1". Similarly, the volume partition + for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'pdName is unique name of the PD resource in + GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'gitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: directory is the target directory name. Must + not contain or start with '..'. If '.' is supplied, the + volume directory will be the git repository. Otherwise, + if specified, the volume will contain the git repository + in the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'glusterfs represents a Glusterfs mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'endpoints is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'readOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to + false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'hostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'path of the directory on the host. If the + path is a symlink, it will follow the link to the real + path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'iscsi represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to + the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support iSCSI + Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support iSCSI + Session CHAP authentication + type: boolean + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator + Name. If initiatorName is specified with iscsiInterface + simultaneously, new iSCSI interface : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iscsiInterface is the interface Name that uses + an iSCSI transport. Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal List. The + portal is either an IP or ip_addr:port if the port is + other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI target + and initiator authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: targetPortal is iSCSI Target Portal. The Portal + is either an IP or ip_addr:port if the port is other than + default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'name of the volume. Must be a DNS_LABEL and unique + within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'nfs represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'readOnly here will force the NFS export to + be mounted with read-only permissions. Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'persistentVolumeClaimVolumeSource represents a + reference to a PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'claimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: readOnly Will force the ReadOnly setting in + VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used to set permissions + on created files by default. Must be an octal value between + 0000 and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires decimal + values for mode bits. Directories within the path are + not affected by this setting. This might be in conflict + with other options that affect the file mode, like fsGroup, + and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + clusterTrustBundle: + description: "ClusterTrustBundle allows a pod to access + the `.spec.trustBundle` field of ClusterTrustBundle + objects in an auto-updating file. \n Alpha, gated + by the ClusterTrustBundleProjection feature gate. + \n ClusterTrustBundle objects can either be selected + by name, or by the combination of signer name and + a label selector. \n Kubelet performs aggressive + normalization of the PEM contents written into the + pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates + are deduplicated. The ordering of certificates within + the file is arbitrary, and Kubelet may change the + order over time." + properties: + labelSelector: + description: Select all ClusterTrustBundles that + match this label selector. Only has effect + if signerName is set. Mutually-exclusive with + name. If unset, interpreted as "match nothing". If + set but empty, interpreted as "match everything". + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + name: + description: Select a single ClusterTrustBundle + by object name. Mutually-exclusive with signerName + and labelSelector. + type: string + optional: + description: If true, don't block pod startup + if the referenced ClusterTrustBundle(s) aren't + available. If using name, then the named ClusterTrustBundle + is allowed not to exist. If using signerName, + then the combination of signerName and labelSelector + is allowed to match zero ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: Select all ClusterTrustBundles that + match this signer name. Mutually-exclusive with + name. The contents of all selected ClusterTrustBundles + will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced ConfigMap + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the ConfigMap, the volume + setup will error unless it is marked optional. + Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to + set permissions on this file, must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the + container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu + and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret data + to project + properties: + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced Secret + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the Secret, the volume setup + will error unless it is marked optional. Paths + must be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: optional field specify whether the + Secret or its key must be defined + type: boolean + type: object + serviceAccountToken: + description: serviceAccountToken is information about + the serviceAccountToken data to project + properties: + audience: + description: audience is the intended audience + of the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: expirationSeconds is the requested + duration of validity of the service account + token. As the token approaches expiration, the + kubelet volume plugin will proactively rotate + the service account token. The kubelet will + start trying to rotate the token if the token + is older than 80 percent of its time to live + or if the token is older than 24 hours.Defaults + to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: path is the path relative to the + mount point of the file to project the token + into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: group to map volume access to Default is no + group + type: string + readOnly: + description: readOnly here will force the Quobyte volume + to be mounted with read-only permissions. Defaults to + false. + type: boolean + registry: + description: registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: user to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'rbd represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + image: + description: 'image is the rados image name. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'pool is the rados pool name. Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'secretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is + nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is the rados user name. Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage for + a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool associated + with the protection domain. + type: string + system: + description: system is the name of the storage system as + configured in ScaleIO. + type: string + volumeName: + description: volumeName is the name of a volume already + created in the ScaleIO system that is associated with + this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits used to + set permissions on created files by default. Must be an + octal value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items If unspecified, each key-value pair in + the Data field of the referenced Secret will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the Secret, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to + set permissions on this file. Must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file + to map the key to. May not be an absolute path. + May not contain the path element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret or + its keys must be defined + type: boolean + secretName: + description: 'secretName is the name of the secret in the + pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: volumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within + a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope of the + volume within StorageOS. If no namespace is specified + then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS + for tighter integration. Set VolumeName to any name to + override the default behaviour. Set to "default" if you + are not using namespaces within StorageOS. Namespaces + that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: fsType is filesystem type to mount. Must be + a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy Based + Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies vSphere + volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + sidecars: + items: + description: A single application container that you want to run + within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will + be unchanged. Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the variable + exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. + The container image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: + i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether + the variable exists or not. Cannot be updated. More info: + https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be + a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. + If a variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, requests.cpu, + requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be + a C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key + will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap must be + defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take + in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More + info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to + sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The Pod''s termination grace period + countdown begins before the PreStop hook is executed. + Regardless of the outcome of the handler, the container + will eventually terminate within the Pod''s termination + grace period (unless delayed by finalizers). Other management + of the container blocks until the hook completes or until + the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to + sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not + specifying a port here DOES NOT prevent that port from being + exposed. Any port which is listening on the default "0.0.0.0" + address inside a container will be accessible from the network. + Modifying this array with strategic merge patch may corrupt + the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in a + single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP + address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If + specified, this must be a valid port number, 0 < x < + 65536. If HostNetwork is specified, this must match + ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod + must have a unique name. Name for the port that can + be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe + fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource resize + policy for the container. + properties: + resourceName: + description: 'Name of the resource to which this resource + resize policy applies. Supported values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified resource + is resized. If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: 'Compute Resources required by this container. + Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only + be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior of + individual containers in a pod. This field may only be set + for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod''s restart policy + and the container type. Setting the RestartPolicy as "Always" + for the init container will have the following effect: this + init container will be continually restarted on exit until + all regular containers have terminated. Once all regular containers + have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init + containers and is often referred to as a "sidecar" container. + Although this init container still starts in the init container + sequence, it does not wait for the container to complete before + proceeding to the next init container. Instead, the next init + container starts immediately after this init container is + started, or after any startupProbe has successfully completed.' + type: string + securityContext: + description: 'SecurityContext defines the security options the + container should be run with. If set, the fields of SecurityContext + override the equivalent fields of PodSecurityContext. More + info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether + a process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN Note that this field cannot be set + when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by + the container runtime. Note that this field cannot be + set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent to + root on the host. Defaults to false. Note that this field + cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to + use for the containers. The default is DefaultProcMount + which uses the container runtime defaults for readonly + paths and masked paths. This requires the ProcMountType + feature flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root + filesystem. Default is false. Note that this field cannot + be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a + non-root user. If true, the Kubelet will validate the + image at runtime to ensure that it does not run as UID + 0 (root) and fail to start the container if it does. If + unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both + SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata + if unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a + random SELinux context for each container. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. + Note that this field cannot be set when spec.os.name is + windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile + must be preconfigured on the node to work. Must be + a descending path, relative to the kubelet's configured + seccomp profile location. Must be set if type is "Localhost". + Must NOT be set for any other type. + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - + a profile defined in a file on the node should be + used. RuntimeDefault - the container runtime default + profile should be used. Unconfined - no profile should + be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is + linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. All of a Pod's + containers must have the same effective HostProcess + value (it is not allowed to have a mix of HostProcess + containers and non-HostProcess containers). In addition, + if HostProcess is true then HostNetwork must also + be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod + will be restarted, just as if the livenessProbe failed. This + can be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. + This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name. This will + be canonicalized upon output, so case-variant + names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer + for stdin in the container runtime. If this is not set, reads + from stdin in the container will always result in EOF. Default + is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the + stdin channel after it has been opened by a single attach. + When stdin is true the stdin stream will remain open across + multiple attach sessions. If stdinOnce is set to true, stdin + is opened on container start, is empty until the first client + attaches to stdin, and then remains open and accepts data + until the client disconnects, at which time stdin is closed + and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the + container''s termination message will be written is mounted + into the container''s filesystem. Message written is intended + to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. + The total message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be + populated. File will use the contents of terminationMessagePath + to populate the container status message on both success and + failure. FallbackToLogsOnError will use the last chunk of + container log output if the termination message file is empty + and the container exited with an error. The log output is + limited to 2048 bytes or 80 lines, whichever is smaller. Defaults + to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other + way around. When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might + be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array type: object status: description: DatabaseObserverStatus defines the observed state of DatabaseObserver properties: conditions: - description: 'INSERT ADDITIONAL STATUS FIELD - define observed state - of cluster Important: Run "make" to regenerate code after modifying - this file' items: description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct @@ -210,9 +9519,12 @@ spec: type: integer status: type: string + version: + type: string required: - conditions - exporterConfig + - version type: object type: object served: true diff --git a/config/rbac/lrest_editor_role.yaml b/config/rbac/lrest_editor_role.yaml new file mode 100644 index 00000000..7f5b2f01 --- /dev/null +++ b/config/rbac/lrest_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit lrests. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: lrest-editor-role +rules: +- apiGroups: + - database.oracle.com + resources: + - lrests + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - database.oracle.com + resources: + - lrests/status + verbs: + - get diff --git a/config/rbac/lrest_viewer_role.yaml b/config/rbac/lrest_viewer_role.yaml new file mode 100644 index 00000000..d74bc977 --- /dev/null +++ b/config/rbac/lrest_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view lrests. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: lrest-viewer-role +rules: +- apiGroups: + - database.oracle.com + resources: + - lrests + verbs: + - get + - list + - watch +- apiGroups: + - database.oracle.com + resources: + - lrests/status + verbs: + - get diff --git a/config/rbac/lrpdb_editor_role.yaml b/config/rbac/lrpdb_editor_role.yaml new file mode 100644 index 00000000..20ae714a --- /dev/null +++ b/config/rbac/lrpdb_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit lrpdbs. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: lrpdb-editor-role +rules: +- apiGroups: + - database.oracle.com + resources: + - lrpdbs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - database.oracle.com + resources: + - lrpdbs/status + verbs: + - get diff --git a/config/rbac/lrpdb_viewer_role.yaml b/config/rbac/lrpdb_viewer_role.yaml new file mode 100644 index 00000000..95bcaab5 --- /dev/null +++ b/config/rbac/lrpdb_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view lrpdbs. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: lrpdb-viewer-role +rules: +- apiGroups: + - database.oracle.com + resources: + - lrpdbs + verbs: + - get + - list + - watch +- apiGroups: + - database.oracle.com + resources: + - lrpdbs/status + verbs: + - get diff --git a/config/rbac/ordssrvs_editor_role.yaml b/config/rbac/ordssrvs_editor_role.yaml new file mode 100644 index 00000000..bc4170f6 --- /dev/null +++ b/config/rbac/ordssrvs_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit ordssrvs. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ordssrvs-editor-role +rules: +- apiGroups: + - database.oracle.com + resources: + - ordssrvs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - database.oracle.com + resources: + - ordssrvs/status + verbs: + - get diff --git a/config/rbac/ordssrvs_viewer_role.yaml b/config/rbac/ordssrvs_viewer_role.yaml new file mode 100644 index 00000000..8880c17d --- /dev/null +++ b/config/rbac/ordssrvs_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view ordssrvs. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ordssrvs-viewer-role +rules: +- apiGroups: + - database.oracle.com + resources: + - ordssrvs + verbs: + - get + - list + - watch +- apiGroups: + - database.oracle.com + resources: + - ordssrvs/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 2f33c915..3a12386c 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -1,48 +1,24 @@ - --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - creationTimestamp: null name: manager-role rules: - apiGroups: - "" resources: - configmaps + - containers + - deployments - events + - namespaces + - persistentvolumeclaims - pods - pods/exec - pods/log - replicasets - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - configmaps - - namespaces - - secrets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - configmaps - secrets + - services verbs: - create - delete @@ -54,49 +30,29 @@ rules: - apiGroups: - "" resources: - - deployments - - events - - pods - - services + - configmaps/status + - daemonsets/status + - deployments/status + - services/status + - statefulsets/status verbs: - - create - - delete - get - - list - patch - update - - watch -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - apiGroups: - "" resources: - - persistentvolumeclaims - - pods - - pods/exec - - pods/log - - services + - persistentvolumes verbs: - - create - - delete - get - list - - patch - - update - watch - apiGroups: - "" resources: - - persistentvolumes + - secrets/status verbs: - get - - list - - watch - apiGroups: - '''''' resources: @@ -119,32 +75,11 @@ rules: - apiGroups: - apps resources: + - daemonsets - deployments - pods - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - apps - resources: - replicasets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - apps - resources: + - services - statefulsets verbs: - create @@ -163,73 +98,22 @@ rules: - get - list - update -- apiGroups: - - "" - resources: - - configmaps - - containers - - events - - namespaces - - persistentvolumeclaims - - pods - - pods/exec - - pods/log - - secrets - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - configmaps - - containers - - events - - namespaces - - pods - - pods/exec - - pods/log - - secrets - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - configmaps - - namespaces - - pods - - secrets - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - pods/exec - verbs: - - create - apiGroups: - database.oracle.com resources: - autonomouscontainerdatabases + - autonomousdatabases + - cdbs + - dataguardbrokers + - dbcssystems + - events + - lrests + - lrpdbs + - oraclerestdataservices + - ordssrvs + - pdbs + - shardingdatabases + - singleinstancedatabases verbs: - create - delete @@ -242,44 +126,18 @@ rules: - database.oracle.com resources: - autonomouscontainerdatabases/status - verbs: - - get - - patch - - update -- apiGroups: - - database.oracle.com - resources: - - autonomousdatabasebackups - verbs: - - create - - delete - - get - - list - - update - - watch -- apiGroups: - - database.oracle.com - resources: - autonomousdatabasebackups/status - verbs: - - get - - patch - - update -- apiGroups: - - database.oracle.com - resources: - - autonomousdatabaserestores - verbs: - - create - - delete - - get - - list - - update - - watch -- apiGroups: - - database.oracle.com - resources: - autonomousdatabaserestores/status + - cdbs/status + - dataguardbrokers/status + - dbcssystems/status + - lrests/status + - lrpdbs/status + - oraclerestdataservices/status + - ordssrvs/status + - pdbs/status + - shardingdatabases/status + - singleinstancedatabases/status verbs: - get - patch @@ -287,13 +145,13 @@ rules: - apiGroups: - database.oracle.com resources: - - autonomousdatabases + - autonomousdatabasebackups + - autonomousdatabaserestores verbs: - create - delete - get - list - - patch - update - watch - apiGroups: @@ -303,159 +161,23 @@ rules: verbs: - patch - update -- apiGroups: - - database.oracle.com - resources: - - cdbs - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - apiGroups: - database.oracle.com resources: - cdbs/finalizers - verbs: - - update -- apiGroups: - - database.oracle.com - resources: - - cdbs/status - verbs: - - get - - patch - - update -- apiGroups: - - database.oracle.com - resources: - - dataguardbrokers - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - database.oracle.com - resources: - dataguardbrokers/finalizers - verbs: - - update -- apiGroups: - - database.oracle.com - resources: - - dataguardbrokers/status - verbs: - - get - - patch - - update -- apiGroups: - - database.oracle.com - resources: - - dbcssystems - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - database.oracle.com - resources: - - dbcssystems/finalizers - verbs: - - create - - delete - - get - - patch - - update -- apiGroups: - - database.oracle.com - resources: - - dbcssystems/status - verbs: - - get - - patch - - update -- apiGroups: - - database.oracle.com - resources: - - oraclerestdataservices - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - database.oracle.com - resources: + - lrests/finalizers - oraclerestdataservices/finalizers + - ordssrvs/finalizers + - singleinstancedatabases/finalizers verbs: - update - apiGroups: - database.oracle.com resources: - - oraclerestdataservices/status - verbs: - - get - - patch - - update -- apiGroups: - - database.oracle.com - resources: - - pdbs - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - database.oracle.com - resources: + - dbcssystems/finalizers + - lrpdbs/finalizers - pdbs/finalizers - verbs: - - create - - delete - - get - - patch - - update -- apiGroups: - - database.oracle.com - resources: - - pdbs/status - verbs: - - get - - patch - - update -- apiGroups: - - database.oracle.com - resources: - - shardingdatabases - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - database.oracle.com - resources: - shardingdatabases/finalizers verbs: - create @@ -463,40 +185,6 @@ rules: - get - patch - update -- apiGroups: - - database.oracle.com - resources: - - shardingdatabases/status - verbs: - - get - - patch - - update -- apiGroups: - - database.oracle.com - resources: - - singleinstancedatabases - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - database.oracle.com - resources: - - singleinstancedatabases/finalizers - verbs: - - update -- apiGroups: - - database.oracle.com - resources: - - singleinstancedatabases/status - verbs: - - get - - patch - - update - apiGroups: - monitoring.coreos.com resources: diff --git a/config/samples/adb/autonomousdatabase_bind.yaml b/config/samples/adb/autonomousdatabase_bind.yaml index 8d1de0fe..702b8f03 100644 --- a/config/samples/adb/autonomousdatabase_bind.yaml +++ b/config/samples/adb/autonomousdatabase_bind.yaml @@ -7,8 +7,9 @@ kind: AutonomousDatabase metadata: name: autonomousdatabase-sample spec: + action: Sync details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... + id: ocid1.autonomousdatabase... # Authorize the operator with API signing key pair. Comment out the ociConfig fields if your nodes are already authorized with instance principal. ociConfig: configMapName: oci-cred diff --git a/config/samples/adb/autonomousdatabase_clone.yaml b/config/samples/adb/autonomousdatabase_clone.yaml new file mode 100644 index 00000000..559d7185 --- /dev/null +++ b/config/samples/adb/autonomousdatabase_clone.yaml @@ -0,0 +1,35 @@ +# +# Copyright (c) 2022, 2024, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +apiVersion: database.oracle.com/v1alpha1 +kind: AutonomousDatabase +metadata: + name: autonomousdatabase-sample +spec: + action: Clone + details: + id: ocid1.autonomousdatabase... + clone: + # Update compartmentOCID with your compartment OCID. + compartmentId: ocid1.compartment... OR ocid1.tenancy... + # The dbName must begin with an alphabetic character and can contain a maximum of 14 alphanumeric characters. Special characters are not permitted. The database name must be unique in the tenancy. + dbName: ClonedADB + displayName: ClonedADB + cpuCoreCount: 1 + adminPassword: + # Comment out k8sSecret and uncomment ociSecret if you pass the admin password using OCI Secret. + k8sSecret: + # The Name of the K8s secret where you want to hold the password of the ADMIN account. + name: admin-password + # ociSecret: + # # The OCID of the OCI Secret that holds the password of the ADMIN account. It should start with ocid1.vaultsecret... . + # id: ocid1.vaultsecret... + dataStorageSizeInTBs: 1 + dbWorkload: OLTP + cloneType: METADATA + # Authorize the operator with API signing key pair. Comment out the ociConfig fields if your nodes are already authorized with instance principal. + ociConfig: + configMapName: oci-cred + # Comment out secretName if using OKE workload identity + secretName: oci-privatekey \ No newline at end of file diff --git a/config/samples/adb/autonomousdatabase_create.yaml b/config/samples/adb/autonomousdatabase_create.yaml index aa77a94c..d633cb84 100644 --- a/config/samples/adb/autonomousdatabase_create.yaml +++ b/config/samples/adb/autonomousdatabase_create.yaml @@ -7,9 +7,10 @@ kind: AutonomousDatabase metadata: name: autonomousdatabase-sample spec: + action: Create details: # Update compartmentOCID with your compartment OCID. - compartmentOCID: ocid1.compartment... OR ocid1.tenancy... + compartmentId: ocid1.compartment... OR ocid1.tenancy... # The dbName must begin with an alphabetic character and can contain a maximum of 14 alphanumeric characters. Special characters are not permitted. The database name must be unique in the tenancy. dbName: NewADB displayName: NewADB diff --git a/config/samples/adb/autonomousdatabase_delete_resource.yaml b/config/samples/adb/autonomousdatabase_delete_resource.yaml index 60a8fe5c..bae1f605 100644 --- a/config/samples/adb/autonomousdatabase_delete_resource.yaml +++ b/config/samples/adb/autonomousdatabase_delete_resource.yaml @@ -8,7 +8,7 @@ metadata: name: autonomousdatabase-sample spec: details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... + id: ocid1.autonomousdatabase... # Delete this resource to terminate database after the changes applied hardLink: true # Authorize the operator with API signing key pair. Comment out the ociConfig fields if your nodes are already authorized with instance principal. diff --git a/config/samples/adb/autonomousdatabase_rename.yaml b/config/samples/adb/autonomousdatabase_rename.yaml index d3a29998..22dbcc0f 100644 --- a/config/samples/adb/autonomousdatabase_rename.yaml +++ b/config/samples/adb/autonomousdatabase_rename.yaml @@ -7,8 +7,9 @@ kind: AutonomousDatabase metadata: name: autonomousdatabase-sample spec: + action: Update details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... + id: ocid1.autonomousdatabase... # The database name dbName: RenamedADB # The user-friendly name for the Autonomous Database diff --git a/config/samples/adb/autonomousdatabase_scale.yaml b/config/samples/adb/autonomousdatabase_scale.yaml index cd100675..ea53e94d 100644 --- a/config/samples/adb/autonomousdatabase_scale.yaml +++ b/config/samples/adb/autonomousdatabase_scale.yaml @@ -7,8 +7,9 @@ kind: AutonomousDatabase metadata: name: autonomousdatabase-sample spec: + action: Update details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... + id: ocid1.autonomousdatabase... # Your database's OPCU core count cpuCoreCount: 2 # Your database's storage size in TB diff --git a/config/samples/adb/autonomousdatabase_stop_start_terminate.yaml b/config/samples/adb/autonomousdatabase_stop_start_terminate.yaml index a41f8d0b..4a191dd6 100644 --- a/config/samples/adb/autonomousdatabase_stop_start_terminate.yaml +++ b/config/samples/adb/autonomousdatabase_stop_start_terminate.yaml @@ -7,10 +7,10 @@ kind: AutonomousDatabase metadata: name: autonomousdatabase-sample spec: + + action: Stop # Use the value "Start" to start the database details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... - # Change the lifecycleState to "AVAILABLE" to start the database - lifecycleState: STOPPED + id: ocid1.autonomousdatabase... # Authorize the operator with API signing key pair. Comment out the ociConfig fields if your nodes are already authorized with instance principal. ociConfig: configMapName: oci-cred diff --git a/config/samples/adb/autonomousdatabase_update_admin_password.yaml b/config/samples/adb/autonomousdatabase_update_admin_password.yaml index 67fd142d..be7aca69 100644 --- a/config/samples/adb/autonomousdatabase_update_admin_password.yaml +++ b/config/samples/adb/autonomousdatabase_update_admin_password.yaml @@ -7,8 +7,9 @@ kind: AutonomousDatabase metadata: name: autonomousdatabase-sample spec: + action: Update details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... + id: ocid1.autonomousdatabase... adminPassword: # Comment out k8sSecret and uncomment ociSecret if you pass the admin password using OCI Secret. k8sSecret: @@ -16,7 +17,7 @@ spec: name: new-admin-password # ociSecret: # # The OCID of the OCI Secret that holds the password of the ADMIN account. It should start with ocid1.vaultsecret... . - # ocid: ocid1.vaultsecret... + # id: ocid1.vaultsecret... # Authorize the operator with API signing key pair. Comment out the ociConfig fields if your nodes are already authorized with instance principal. ociConfig: configMapName: oci-cred diff --git a/config/samples/adb/autonomousdatabase_update_mtls.yaml b/config/samples/adb/autonomousdatabase_update_mtls.yaml index 93db0d8a..25eda529 100644 --- a/config/samples/adb/autonomousdatabase_update_mtls.yaml +++ b/config/samples/adb/autonomousdatabase_update_mtls.yaml @@ -7,12 +7,11 @@ kind: AutonomousDatabase metadata: name: autonomousdatabase-sample spec: + action: Update details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... - networkAccess: - # Set the patameter to false to allow both TLS and mutual TLS (mTLS) authentication. - # Avaiable when the networkAccessType is RESTRICTED or PRIVATE on shared Autnomous Database. - isMTLSConnectionRequired: false + id: ocid1.autonomousdatabase... + # Set the patameter to false to allow both TLS and mutual TLS (mTLS) authentication, or true to require mTLS connections and disallow TLS connections. + isMTLSConnectionRequired: true # Authorize the operator with API signing key pair. Comment out the ociConfig fields if your nodes are already authorized with instance principal. ociConfig: diff --git a/config/samples/adb/autonomousdatabase_update_network_access.yaml b/config/samples/adb/autonomousdatabase_update_network_access.yaml index f0e98806..7dd3fa0c 100644 --- a/config/samples/adb/autonomousdatabase_update_network_access.yaml +++ b/config/samples/adb/autonomousdatabase_update_network_access.yaml @@ -7,39 +7,38 @@ kind: AutonomousDatabase metadata: name: autonomousdatabase-sample spec: + action: Update details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... - networkAccess: - # Allow secure access from everywhere. - accessType: PUBLIC + id: ocid1.autonomousdatabase... + # # Allow secure access from everywhere. Uncomment one of the following field depends on your network access configuration. + # accessControlList: + # - + # privateEndpoint: "" - # # Uncomment this block to configure the network access type with the RESTRICTED option. - # # This option lets you restrict access by defining access control rules in an Access Control List (ACL). - # # By specifying an ACL, the database will be accessible from a whitelisted set of IP addresses, CIDR (Classless Inter-Domain Routing) blocks, or VCNs. - # # Use a semicolon (;) as a deliminator between the VCN-specific subnets or IPs. - # accessType: RESTRICTED - # accessControlList: - # - 1.1.1.1 - # - 1.1.0.0/16 - # - ocid1.vcn... - # - ocid1.vcn...;1.1.1.1 - # - ocid1.vcn...;1.1.0.0/16 + # # Uncomment this block to configure the network access type with the RESTRICTED option. + # # This option lets you restrict access by defining access control rules in an Access Control List (ACL). + # # By specifying an ACL, the database will be accessible from a whitelisted set of IP addresses, CIDR (Classless Inter-Domain Routing) blocks, or VCNs. + # # Use a semicolon (;) as a deliminator between the VCN-specific subnets or IPs. + # accessControlList: + # - 1.1.1.1 + # - 1.1.0.0/16 + # - ocid1.vcn... + # - ocid1.vcn...;1.1.1.1 + # - ocid1.vcn...;1.1.0.0/16 - # # Uncomment this block to configure the network access type with the PRIVATE option. - # # This option assigns a private endpoint, private IP, and hostname to your database. - # # Specifying this option allows traffic only from the VCN you specify. - # # This allows you to define security rules, ingress/egress, at the Network Security Group (NSG) level and to control traffic to your Autonomous Database. - # accessType: PRIVATE - # privateEndpoint: - # subnetOCID: ocid1.subnet... - # nsgOCIDs: # Optional - # - ocid1.networksecuritygroup... + # # Uncomment this block to configure the network access type with the PRIVATE option. + # # This option assigns a private endpoint, private IP, and hostname to your database. + # # Specifying this option allows traffic only from the VCN you specify. + # # This allows you to define security rules, ingress/egress, at the Network Security Group (NSG) level and to control traffic to your Autonomous Database. + # privateEndpoint: + # subnetOCID: ocid1.subnet... + # nsgOCIDs: # Optional + # - ocid1.networksecuritygroup... - # # Uncomment this block to configure the network access of an dedicated Autonomous Database (ADB-D) with an access control list. - # isAccessControlEnabled: true - # accessControlList: - # - 1.1.1.1 - # - 1.1.0.0/16 + # # Uncomment this block to configure the network access of an dedicated Autonomous Database (ADB-D) with an access control list. + # accessControlList: + # - 1.1.1.1 + # - 1.1.0.0/16 # Authorize the operator with API signing key pair. Comment out the ociConfig fields if your nodes are already authorized with instance principal. ociConfig: diff --git a/config/samples/adb/autonomousdatabase_wallet.yaml b/config/samples/adb/autonomousdatabase_wallet.yaml index 15fa6ca0..84136647 100644 --- a/config/samples/adb/autonomousdatabase_wallet.yaml +++ b/config/samples/adb/autonomousdatabase_wallet.yaml @@ -7,19 +7,20 @@ kind: AutonomousDatabase metadata: name: autonomousdatabase-sample spec: + action: Update details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... - wallet: - # Insert a name of the secret where you want the wallet to be stored. The default name is -instance-wallet. - name: instance-wallet - password: - # Comment out k8sSecret and uncomment ociSecret if you pass the admin password using OCI Secret. - k8sSecret: - # The Name of the K8s secret where you want to hold the password of the ADMIN account. - name: instance-wallet-password - # ociSecret: - # # The OCID of the OCI Secret that holds the password of the ADMIN account. It should start with ocid1.vaultsecret... . - # ocid: ocid1.vaultsecret... + id: ocid1.autonomousdatabase... + wallet: + # Insert a name of the secret where you want the wallet to be stored. The default name is -instance-wallet. + name: instance-wallet + password: + # Comment out k8sSecret and uncomment ociSecret if you pass the admin password using OCI Secret. + k8sSecret: + # The Name of the K8s secret where you want to hold the password of the ADMIN account. + name: instance-wallet-password + # ociSecret: + # # The OCID of the OCI Secret that holds the password of the ADMIN account. It should start with ocid1.vaultsecret... . + # id: ocid1.vaultsecret... # Authorize the operator with API signing key pair. Comment out the ociConfig fields if your nodes are already authorized with instance principal. ociConfig: configMapName: oci-cred diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index ac5e158f..1a032832 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -29,5 +29,23 @@ resources: - adb/autonomousdatabase_bind.yaml - adb/autonomousdatabase_backup.yaml - adb/autonomousdatabase_restore.yaml + - acd/autonomouscontainerdatabase_create.yaml + - sidb/singleinstancedatabase.yaml + - sharding/shardingdatabase.yaml + - sharding/sharding_v1alpha1_provshard.yaml + - dbcs/database_v1alpha1_dbcssystem.yaml + - database_v1alpha1_dataguardbroker.yaml + - database_v1alpha1_shardingdatabase.yaml + - observability/v1alpha1/databaseobserver.yaml + - observability/v1/databaseobserver.yaml + - observability/v4/databaseobserver.yaml - acd/autonomouscontainerdatabase_restart_terminate.yaml + - database_v4_shardingdatabase.yaml + - database_v4_dbcssystem.yaml +- database_v4_lrest.yaml +- database_v4_lrpdb.yaml +- database_v4_ordssrvs.yaml +- database_v4_singleinstancedatabase.yaml +- database_v4_dataguardbroker.yaml +- database_v4_oraclerestdataservice.yaml # +kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/samples/observability/v1/databaseobserver.yaml b/config/samples/observability/v1/databaseobserver.yaml new file mode 100644 index 00000000..82a7e89e --- /dev/null +++ b/config/samples/observability/v1/databaseobserver.yaml @@ -0,0 +1,81 @@ +# example +apiVersion: observability.oracle.com/v1 +kind: DatabaseObserver +metadata: + name: obs-sample + labels: + app.kubernetes.io/name: observability-exporter + app.kubernetes.io/instance: obs-sample + app.kubernetes.io/version: 1.5.1 +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + key: "password" + secret: db-secret + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallet + + inheritLabels: + - app.kubernetes.io/name + - app.kubernetes.io/instance + - app.kubernetes.io/version + + sidecars: [ ] + sidecarVolumes: [ ] + + exporter: + deployment: + env: + TNS_ADMIN: /some/custom/path + ORACLE_HOME: /some/custom/path + DB_ROLE: SYSDBA + image: "container-registry.oracle.com/database/observability-exporter:1.5.1" + args: [ "--log.level=info" ] + commands: [ "/oracledb_exporter" ] + + labels: + environment: dev + podTemplate: + labels: + environment: dev + + service: + labels: + environment: dev + + configuration: + configMap: + key: "config.toml" + name: "devcm-oradevdb-config" + + prometheus: + serviceMonitor: + labels: + release: prometheus + + + log: + filename: "alert.log" + path: "/log" + + volume: + name: volume + persistentVolumeClaim: + claimName: "my-pvc" + + replicas: 1 + + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + + diff --git a/config/samples/observability/v1/databaseobserver_customization_fields.yaml b/config/samples/observability/v1/databaseobserver_customization_fields.yaml new file mode 100644 index 00000000..d88caec4 --- /dev/null +++ b/config/samples/observability/v1/databaseobserver_customization_fields.yaml @@ -0,0 +1,54 @@ +# example +apiVersion: observability.oracle.com/v4 +kind: DatabaseObserver +metadata: + name: obs-sample +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + key: "password" + secret: db-secret + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallets + + exporter: + deployment: + image: "container-registry.oracle.com/database/observability-exporter:1.5.1" + args: + - "--log.level=info" + commands: + - "/oracledb_exporter" + env: + TNS_ADMIN: /some/custom/path + labels: + environment: dev + podTemplate: + labels: + environment: dev + service: + ports: + - name: "metrics" + port: 9161 + targetPort: 9161 + labels: + environment: dev + + prometheus: + serviceMonitor: + endpoints: + - bearerTokenSecret: + key: '' + interval: 15s + port: metrics + labels: + release: prometheus + diff --git a/config/samples/observability/v1/databaseobserver_logs_promtail.yaml b/config/samples/observability/v1/databaseobserver_logs_promtail.yaml new file mode 100644 index 00000000..8130f487 --- /dev/null +++ b/config/samples/observability/v1/databaseobserver_logs_promtail.yaml @@ -0,0 +1,74 @@ +# example +apiVersion: observability.oracle.com/v1 +kind: DatabaseObserver +metadata: + name: obs-sample + labels: + app.kubernetes.io/name: observability-exporter + app.kubernetes.io/instance: obs-sample + app.kubernetes.io/version: 1.5.1 +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + key: "password" + secret: db-secret + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallet + + inheritLabels: + - app.kubernetes.io/name + - app.kubernetes.io/instance + - app.kubernetes.io/version + + sidecars: + - name: promtail + image: grafana/promtail + args: + - -config.file=/etc/promtail/promtail.yaml + volumeMounts: + - name: config + mountPath: /etc/promtail + - name: log-volume + mountPath: /log + + sidecarVolumes: + - name: config + configMap: + name: promtail-sidecar-config + exporter: + deployment: + image: "container-registry.oracle.com/database/observability-exporter:1.5.1" + args: [ "--log.level=info" ] + commands: [ "/oracledb_exporter" ] + + configuration: + configMap: + key: "config.toml" + name: "devcm-oradevdb-config" + + prometheus: + serviceMonitor: + labels: + release: prometheus + + log: + filename: "alert.log" + path: "/log" + + volume: + name: log-volume + + replicas: 1 + + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey diff --git a/config/samples/observability/v1alpha1/databaseobserver.yaml b/config/samples/observability/v1alpha1/databaseobserver.yaml new file mode 100644 index 00000000..24672d8b --- /dev/null +++ b/config/samples/observability/v1alpha1/databaseobserver.yaml @@ -0,0 +1,80 @@ +# example +apiVersion: observability.oracle.com/v1alpha1 +kind: DatabaseObserver +metadata: + name: obs-sample + labels: + app.kubernetes.io/name: observability-exporter + app.kubernetes.io/instance: obs-sample + app.kubernetes.io/version: 1.5.1 +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + key: "password" + secret: db-secret + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallet + + + inheritLabels: + - app.kubernetes.io/name + - app.kubernetes.io/instance + - app.kubernetes.io/version + + sidecars: [ ] + sidecarVolumes: [ ] + + exporter: + deployment: + image: "container-registry.oracle.com/database/observability-exporter:1.5.1" + args: [ "--log.level=info" ] + commands: [ "/oracledb_exporter" ] + env: + TNS_ADMIN: /some/custom/path + ORACLE_HOME: /some/custom/path + labels: + environment: dev + podTemplate: + labels: + environment: dev + + service: + labels: + environment: dev + + configuration: + configMap: + key: "config.toml" + name: "devcm-oradevdb-config" + + prometheus: + serviceMonitor: + labels: + release: prometheus + + + log: + filename: "alert.log" + path: "/log" + + volume: + name: volume + persistentVolumeClaim: + claimName: "my-pvc" + + replicas: 1 + + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + + diff --git a/config/samples/observability/v1alpha1/databaseobserver_custom_config.yaml b/config/samples/observability/v1alpha1/databaseobserver_custom_config.yaml new file mode 100644 index 00000000..8e0d0623 --- /dev/null +++ b/config/samples/observability/v1alpha1/databaseobserver_custom_config.yaml @@ -0,0 +1,46 @@ +# example +apiVersion: observability.oracle.com/v1alpha1 +kind: DatabaseObserver +metadata: + name: obs-sample + labels: + app.kubernetes.io/name: observability-exporter + app.kubernetes.io/instance: obs-sample + app.kubernetes.io/version: 1.5.1 +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + key: "password" + secret: db-secret + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallet + + inherit_labels: + - app.kubernetes.io/name + - app.kubernetes.io/instance + - app.kubernetes.io/version + + exporter: + deployment: + image: "container-registry.oracle.com/database/observability-exporter:1.5.1" + args: [ "--log.level=info" ] + commands: [ "/oracledb_exporter" ] + + prometheus: + serviceMonitor: + labels: + release: prometheus + + configuration: + configMap: + key: "config.toml" + name: "devcm-oradevdb-config" \ No newline at end of file diff --git a/config/samples/observability/v1alpha1/databaseobserver_logs_promtail.yaml b/config/samples/observability/v1alpha1/databaseobserver_logs_promtail.yaml new file mode 100644 index 00000000..28592cb0 --- /dev/null +++ b/config/samples/observability/v1alpha1/databaseobserver_logs_promtail.yaml @@ -0,0 +1,74 @@ +# example +apiVersion: observability.oracle.com/v1alpha1 +kind: DatabaseObserver +metadata: + name: obs-sample + labels: + app.kubernetes.io/name: observability-exporter + app.kubernetes.io/instance: obs-sample + app.kubernetes.io/version: 1.5.1 +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + key: "password" + secret: db-secret + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallet + + inheritLabels: + - app.kubernetes.io/name + - app.kubernetes.io/instance + - app.kubernetes.io/version + + sidecars: + - name: promtail + image: grafana/promtail + args: + - -config.file=/etc/promtail/promtail.yaml + volumeMounts: + - name: config + mountPath: /etc/promtail + - name: log-volume + mountPath: /log + + sidecarVolumes: + - name: config + configMap: + name: promtail-sidecar-config + exporter: + deployment: + image: "container-registry.oracle.com/database/observability-exporter:1.5.1" + args: [ "--log.level=info" ] + commands: [ "/oracledb_exporter" ] + + configuration: + configMap: + key: "config.toml" + name: "devcm-oradevdb-config" + + prometheus: + serviceMonitor: + labels: + release: prometheus + + log: + filename: "alert.log" + path: "/log" + + volume: + name: log-volume + + replicas: 1 + + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey diff --git a/config/samples/observability/v1alpha1/databaseobserver_minimal.yaml b/config/samples/observability/v1alpha1/databaseobserver_minimal.yaml new file mode 100644 index 00000000..74620ac7 --- /dev/null +++ b/config/samples/observability/v1alpha1/databaseobserver_minimal.yaml @@ -0,0 +1,26 @@ +# example +apiVersion: observability.oracle.com/v1alpha1 +kind: DatabaseObserver +metadata: + name: obs-sample +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + key: "password" + secret: db-secret + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallets + + prometheus: + serviceMonitor: + labels: + release: prometheus \ No newline at end of file diff --git a/config/samples/observability/v1alpha1/databaseobserver_vault.yaml b/config/samples/observability/v1alpha1/databaseobserver_vault.yaml new file mode 100644 index 00000000..2fc3c9f0 --- /dev/null +++ b/config/samples/observability/v1alpha1/databaseobserver_vault.yaml @@ -0,0 +1,30 @@ +# example +apiVersion: observability.oracle.com/v1alpha1 +kind: DatabaseObserver +metadata: + name: obs-sample +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + vaultSecretName: sample_secret + vaultOCID: ocid1.vault.oc1.. + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallet + + prometheus: + serviceMonitor: + labels: + release: prometheus + + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey \ No newline at end of file diff --git a/config/samples/observability/v4/databaseobserver.yaml b/config/samples/observability/v4/databaseobserver.yaml new file mode 100644 index 00000000..f7b310f7 --- /dev/null +++ b/config/samples/observability/v4/databaseobserver.yaml @@ -0,0 +1,79 @@ +# example +apiVersion: observability.oracle.com/v4 +kind: DatabaseObserver +metadata: + name: obs-sample + labels: + app.kubernetes.io/name: observability-exporter + app.kubernetes.io/instance: obs-sample + app.kubernetes.io/version: latest +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + key: "password" + secret: db-secret + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallet + + inheritLabels: + - app.kubernetes.io/name + - app.kubernetes.io/instance + - app.kubernetes.io/version + + sidecars: [ ] + sidecarVolumes: [ ] + + exporter: + deployment: + image: "container-registry.oracle.com/database/observability-exporter:1.5.1" + args: [ "--log.level=info" ] + commands: [ "/oracledb_exporter" ] + env: + TNS_ADMIN: /some/custom/path + ORACLE_HOME: /some/custom/path + labels: + environment: dev + podTemplate: + labels: + environment: dev + + service: + labels: + environment: dev + + + configuration: + configMap: + key: "config.toml" + name: "devcm-oradevdb-config" + + prometheus: + serviceMonitor: + labels: + release: prometheus + + log: + filename: "alert.log" + path: "/log" + + volume: + name: volume + persistentVolumeClaim: + claimName: "my-pvc" + + replicas: 1 + + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + + diff --git a/config/samples/observability/v4/databaseobserver_custom_config.yaml b/config/samples/observability/v4/databaseobserver_custom_config.yaml new file mode 100644 index 00000000..dd2e3da5 --- /dev/null +++ b/config/samples/observability/v4/databaseobserver_custom_config.yaml @@ -0,0 +1,46 @@ +# example +apiVersion: observability.oracle.com/v4 +kind: DatabaseObserver +metadata: + name: obs-sample + labels: + app.kubernetes.io/name: observability-exporter + app.kubernetes.io/instance: obs-sample + app.kubernetes.io/version: latest +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + key: "password" + secret: db-secret + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallet + + inherit_labels: + - app.kubernetes.io/name + - app.kubernetes.io/instance + - app.kubernetes.io/version + + exporter: + deployment: + image: "container-registry.oracle.com/database/observability-exporter:1.5.1" + args: [ "--log.level=info" ] + commands: [ "/oracledb_exporter" ] + + configuration: + configMap: + key: "config.toml" + name: "devcm-oradevdb-config" + + prometheus: + serviceMonitor: + labels: + release: prometheus \ No newline at end of file diff --git a/config/samples/observability/v4/databaseobserver_logs_promtail.yaml b/config/samples/observability/v4/databaseobserver_logs_promtail.yaml new file mode 100644 index 00000000..26a747a3 --- /dev/null +++ b/config/samples/observability/v4/databaseobserver_logs_promtail.yaml @@ -0,0 +1,76 @@ +# example +apiVersion: observability.oracle.com/v4 +kind: DatabaseObserver +metadata: + name: obs-sample + labels: + app.kubernetes.io/name: observability-exporter + app.kubernetes.io/instance: obs-sample + app.kubernetes.io/version: latest +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + key: "password" + secret: db-secret + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallet + + inheritLabels: + - app.kubernetes.io/name + - app.kubernetes.io/instance + - app.kubernetes.io/version + + sidecars: + - name: promtail + image: grafana/promtail + args: + - -config.file=/etc/promtail/promtail.yaml + volumeMounts: + - name: config + mountPath: /etc/promtail + - name: log-volume + mountPath: /log + + sidecarVolumes: + - name: config + configMap: + name: promtail-sidecar-config + exporter: + deployment: + image: "container-registry.oracle.com/database/observability-exporter:1.5.1" + args: [ "--log.level=info" ] + commands: [ "/oracledb_exporter" ] + + configuration: + configMap: + key: "config.toml" + name: "devcm-oradevdb-config" + + prometheus: + serviceMonitor: + labels: + release: prometheus + + log: + filename: "alert.log" + path: "/log" + + volume: + name: log-volume + + replicas: 1 + + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + + diff --git a/config/samples/observability/v4/databaseobserver_minimal.yaml b/config/samples/observability/v4/databaseobserver_minimal.yaml new file mode 100644 index 00000000..cc14fbea --- /dev/null +++ b/config/samples/observability/v4/databaseobserver_minimal.yaml @@ -0,0 +1,26 @@ +# example +apiVersion: observability.oracle.com/v4 +kind: DatabaseObserver +metadata: + name: obs-sample +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + key: "password" + secret: db-secret + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallets + + prometheus: + serviceMonitor: + labels: + release: prometheus diff --git a/config/samples/observability/v4/databaseobserver_vault.yaml b/config/samples/observability/v4/databaseobserver_vault.yaml new file mode 100644 index 00000000..4f5845f6 --- /dev/null +++ b/config/samples/observability/v4/databaseobserver_vault.yaml @@ -0,0 +1,39 @@ +# example +apiVersion: observability.oracle.com/v4 +kind: DatabaseObserver +metadata: + name: obs-sample + labels: + app.kubernetes.io/name: observability-exporter + app.kubernetes.io/instance: obs-sample + app.kubernetes.io/version: latest +spec: + database: + dbUser: + key: "username" + secret: db-secret + + dbPassword: + vaultSecretName: sample_secret + vaultOCID: ocid1.vault.oc1.. + + dbConnectionString: + key: "connection" + secret: db-secret + + dbWallet: + secret: instance-wallet + + inherit_labels: + - app.kubernetes.io/name + - app.kubernetes.io/instance + - app.kubernetes.io/version + + prometheus: + serviceMonitor: + labels: + release: prometheus + + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey \ No newline at end of file diff --git a/config/samples/sidb/dataguardbroker.yaml b/config/samples/sidb/dataguardbroker.yaml index 5425afb7..644d2d40 100644 --- a/config/samples/sidb/dataguardbroker.yaml +++ b/config/samples/sidb/dataguardbroker.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: DataguardBroker metadata: name: dataguardbroker-sample @@ -25,5 +25,9 @@ spec: ## Protection Mode for dg configuration . MaxAvailability or MaxPerformance protectionMode: MaxAvailability - ## Manual Switchover to this database to make it primary(if not already), requires target Database SID . + ## Specify the database SID to switchover thereby making it the primary. + ## Switchover is not supported when fastStartFailover is true. setAsPrimaryDatabase: "" + + ## Enable/disable Fast-Start Failover for the dataguard configuration. + fastStartFailover: false diff --git a/config/samples/sidb/oraclerestdataservice.yaml b/config/samples/sidb/oraclerestdataservice.yaml index 911a9b1e..77555f47 100644 --- a/config/samples/sidb/oraclerestdataservice.yaml +++ b/config/samples/sidb/oraclerestdataservice.yaml @@ -2,7 +2,7 @@ # Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: OracleRestDataService metadata: name: ords-sample @@ -26,24 +26,9 @@ spec: secretKey: keepSecret: true - ## To configure APEX with ORDS, specfiy the apexPassword secret details. Leave empty if Apex is not needed - ## This is a secret containing a common password for APEX_PUBLIC_USER, APEX_REST_PUBLIC_USER, APEX_LISTENER and Apex administrator (username: ADMIN) mapped to secretKey - ## This secret will be deleted after ORDS Installation unless keepSecret set to true. - ## This password should complete the following requirements: - ## 1. Contain at least 6 characters. - ## 2. Contain at least one numeric character (0123456789). - ## 3. Contain at least one punctuation character (!"#$%&()``*+,-/:;?_). - ## 4. Contain at least one uppercase alphabetic character. - - apexPassword: - secretName: - secretKey: - keepSecret: true - ## ORDS image details - ## Supported ORDS image is container-registry.oracle.com/database/ords:21.4.2-gh image: - pullFrom: container-registry.oracle.com/database/ords:21.4.2-gh + pullFrom: container-registry.oracle.com/database/ords-developer:latest pullSecrets: ## Dedicated persistent storage is optional. If not specified, ORDS will use persistent storage from .spec.databaseRef @@ -65,6 +50,8 @@ spec: #serviceAnnotations: # service.beta.kubernetes.io/oci-load-balancer-internal: "true" + ## Set this to true to enable MongoDB API + mongoDbApi: true ## Deploy only on nodes having required labels. Format label_name: label_value ## The same lables are applied to the created PVC diff --git a/config/samples/sidb/oraclerestdataservice_apex.yaml b/config/samples/sidb/oraclerestdataservice_apex.yaml deleted file mode 100644 index 6bdc9fb5..00000000 --- a/config/samples/sidb/oraclerestdataservice_apex.yaml +++ /dev/null @@ -1,42 +0,0 @@ -# -# Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# - -apiVersion: database.oracle.com/v1alpha1 -kind: OracleRestDataService -metadata: - name: ords-sample - namespace: default -spec: - - ## Database ref. This can be of kind SingleInstanceDatabase. - ## Make sure the source database has been created by applying singeinstancedatabase_express.yaml - databaseRef: "xedb-sample" - - ## Secret containing databaseRef password - adminPassword: - secretName: xedb-admin-secret - - ## Secret containing ORDS_PUBLIC_USER password - ordsPassword: - secretName: ords-secret - - ## To configure APEX with ORDS, specfiy the apexPassword secret details. Leave empty if Apex is not needed. - ## This is a secret containing a common password for APEX_PUBLIC_USER, APEX_REST_PUBLIC_USER, APEX_LISTENER and Apex administrator (username: ADMIN) - apexPassword: - secretName: apex-secret - - ## ORDS image details - image: - pullFrom: container-registry.oracle.com/database/ords:21.4.2-gh - - ## PDB Schemas to be ORDS Enabled. - ## Schema will be created (if not exists) with password as .spec.ordsPassword. - restEnableSchemas: - - schemaName: schema1 - enable: true - urlMapping: - - schemaName: schema2 - enable: true - urlMapping: myschema diff --git a/config/samples/sidb/oraclerestdataservice_create.yaml b/config/samples/sidb/oraclerestdataservice_create.yaml index 454abf37..e98ca018 100644 --- a/config/samples/sidb/oraclerestdataservice_create.yaml +++ b/config/samples/sidb/oraclerestdataservice_create.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: OracleRestDataService metadata: name: ords-sample @@ -24,7 +24,10 @@ spec: ## ORDS image details image: - pullFrom: container-registry.oracle.com/database/ords:21.4.2-gh + pullFrom: container-registry.oracle.com/database/ords-developer:latest + + ## Set this to true to enable MongoDB API + mongoDbApi: true ## PDB Schemas to be ORDS Enabled. ## Schema will be created (if not exists) with password as .spec.ordsPassword. @@ -34,4 +37,4 @@ spec: urlMapping: - schemaName: schema2 enable: true - urlMapping: myschema + urlMapping: myschema \ No newline at end of file diff --git a/config/samples/sidb/oraclerestdataservice_secrets.yaml b/config/samples/sidb/oraclerestdataservice_secrets.yaml index 9e587960..aebca546 100644 --- a/config/samples/sidb/oraclerestdataservice_secrets.yaml +++ b/config/samples/sidb/oraclerestdataservice_secrets.yaml @@ -13,21 +13,3 @@ type: Opaque stringData: ## Specify your ORDS password here oracle_pwd: - ---- - -## APEX password secret -apiVersion: v1 -kind: Secret -metadata: - name: apex-secret - namespace: default -type: Opaque -stringData: - ## Specify your APEX password here - ## This password should complete the following requirements: - ## 1. Contain at least 6 characters. - ## 2. Contain at least one numeric character (0123456789). - ## 3. Contain at least one punctuation character (!"#$%&()``*+,-/:;?_). - ## 4. Contain at least one uppercase alphabetic character. - oracle_pwd: diff --git a/config/samples/sidb/singleinstancedatabase.yaml b/config/samples/sidb/singleinstancedatabase.yaml index 4425acea..368762f5 100644 --- a/config/samples/sidb/singleinstancedatabase.yaml +++ b/config/samples/sidb/singleinstancedatabase.yaml @@ -2,7 +2,7 @@ # Copyright (c) 2023, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: SingleInstanceDatabase metadata: name: sidb-sample @@ -17,14 +17,24 @@ spec: edition: enterprise ## Type of database. - ## Valid values for createAs are primary, clone or standby - ## Valid only for enterprise and standard editions + ## Valid values for createAs are primary, clone, standby or truecache createAs: primary + ## Specify true to convert this standby to a snapshot standby + ## Valid only if createAs is standby + convertToSnapshotStandby: false + ## Reference to a source primary database. - ## Valid only for createAs clone or standby + ## Valid only when createAs is clone, standby or truecache ## The name of a source primary database resource from the same namespace primaryDatabaseRef: "" + + ## Only valid when createAs is set to truecache + ## Accepts a semi colon separated map of `PRIMARY_PDB_SERIVCE_NAME:PRIMARY_SERVICE_NAME:TRUECACHE_SERVICE_NAME` + trueCacheServices: + # - "FREEPDB1:sales1:sales1_tc" + # - "FREEPDB1:sales2:sales2_tc" + # - "FREEPDB1:sales3:sales3_tc" ## Secret containing SIDB password mapped to secretKey. secretKey defaults to oracle_pwd ## Should refer to adminPassword of Source DB if createAs is clone or standby diff --git a/config/samples/sidb/singleinstancedatabase_clone.yaml b/config/samples/sidb/singleinstancedatabase_clone.yaml index f25484d9..438d4ea5 100644 --- a/config/samples/sidb/singleinstancedatabase_clone.yaml +++ b/config/samples/sidb/singleinstancedatabase_clone.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: SingleInstanceDatabase metadata: name: sidb-sample-clone diff --git a/config/samples/sidb/singleinstancedatabase_create.yaml b/config/samples/sidb/singleinstancedatabase_create.yaml index d09d9c26..2a4e4bae 100644 --- a/config/samples/sidb/singleinstancedatabase_create.yaml +++ b/config/samples/sidb/singleinstancedatabase_create.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: SingleInstanceDatabase metadata: # Creates base sidb-sample. Use singleinstancedatabase_clone.yaml for cloning @@ -33,7 +33,7 @@ spec: ## Database image details image: - pullFrom: container-registry.oracle.com/database/enterprise:latest + pullFrom: container-registry.oracle.com/database/enterprise_ru:19 pullSecrets: oracle-container-registry-secret ## size is the required minimum size of the persistent volume diff --git a/config/samples/sidb/singleinstancedatabase_express.yaml b/config/samples/sidb/singleinstancedatabase_express.yaml index 64f2e351..2cabbdaf 100644 --- a/config/samples/sidb/singleinstancedatabase_express.yaml +++ b/config/samples/sidb/singleinstancedatabase_express.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: SingleInstanceDatabase metadata: name: xedb-sample diff --git a/config/samples/sidb/singleinstancedatabase_free-lite.yaml b/config/samples/sidb/singleinstancedatabase_free-lite.yaml new file mode 100644 index 00000000..93b3c4c9 --- /dev/null +++ b/config/samples/sidb/singleinstancedatabase_free-lite.yaml @@ -0,0 +1,35 @@ +# +# Copyright (c) 2024, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# + +apiVersion: database.oracle.com/v4 +kind: SingleInstanceDatabase +metadata: + name: freedb-lite-sample + namespace: default +spec: + + ## DB edition + edition: free + + ## Secret containing SIDB password mapped to secretKey + adminPassword: + secretName: freedb-admin-secret + + ## Database image details + image: + ## Oracle Database Free Lite is only supported from DB version 23.2 onwards + pullFrom: container-registry.oracle.com/database/free:latest-lite + + ## size is the required minimum size of the persistent volume + ## storageClass is specified for automatic volume provisioning + ## accessMode can only accept one of ReadWriteOnce, ReadWriteMany + persistence: + size: 50Gi + ## oci-bv applies to OCI block volumes. Use "standard" storageClass for dynamic provisioning in Minikube. Update as appropriate for other cloud service providers + storageClass: "oci-bv" + accessMode: "ReadWriteOnce" + + ## Count of Database Pods. Should be 1 for free edition. + replicas: 1 \ No newline at end of file diff --git a/config/samples/sidb/singleinstancedatabase_free-truecache.yaml b/config/samples/sidb/singleinstancedatabase_free-truecache.yaml new file mode 100644 index 00000000..c2481f7c --- /dev/null +++ b/config/samples/sidb/singleinstancedatabase_free-truecache.yaml @@ -0,0 +1,48 @@ +# +# Copyright (c) 2024, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# + +apiVersion: database.oracle.com/v4 +kind: SingleInstanceDatabase +metadata: + name: truecache-sample + namespace: default +spec: + + ## DB edition + edition: free + + ## DB Type + createAs: truecache + + ## Reference to the source primary database. + primaryDatabaseRef: "freedb-sample" + + ## Accepts a semi colon separated list of `PRIMARY_PDB_SERIVCE_NAME:PRIMARY_SERVICE_NAME:TRUECACHE_SERVICE_NAME` + trueCacheServices: + # - "FREEPDB1:sales1:sales1_tc" + # - "FREEPDB1:sales2:sales2_tc" + # - "FREEPDB1:sales3:sales3_tc" + + ## Secret containing SIDB password mapped to secretKey + adminPassword: + secretName: freedb-admin-secret + + ## Database image details + image: + ## Oracle True Cache is only supported with 23ai + pullFrom: container-registry.oracle.com/database/free:latest + + + ## size is the required minimum size of the persistent volume + ## storageClass is specified for automatic volume provisioning + ## accessMode can only accept one of ReadWriteOnce, ReadWriteMany + persistence: + size: 50Gi + ## oci-bv applies to OCI block volumes. Use "standard" storageClass for dynamic provisioning in Minikube. Update as appropriate for other cloud service providers + storageClass: "oci-bv" + accessMode: "ReadWriteOnce" + + ## Count of Database Pods. Should be 1 for free edition. + replicas: 1 diff --git a/config/samples/sidb/singleinstancedatabase_free.yaml b/config/samples/sidb/singleinstancedatabase_free.yaml index 6dd0aa39..6238e52e 100644 --- a/config/samples/sidb/singleinstancedatabase_free.yaml +++ b/config/samples/sidb/singleinstancedatabase_free.yaml @@ -3,16 +3,13 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: SingleInstanceDatabase metadata: name: freedb-sample namespace: default spec: - - ## Use only alphanumeric characters for sid - sid: FREE - + ## DB edition edition: free diff --git a/config/samples/sidb/singleinstancedatabase_patch.yaml b/config/samples/sidb/singleinstancedatabase_patch.yaml index 9a211cdc..455bdc79 100644 --- a/config/samples/sidb/singleinstancedatabase_patch.yaml +++ b/config/samples/sidb/singleinstancedatabase_patch.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: SingleInstanceDatabase metadata: # sidb-sample should have already been created using singleinstancedatabase_create.yaml diff --git a/config/samples/sidb/singleinstancedatabase_prebuiltdb.yaml b/config/samples/sidb/singleinstancedatabase_prebuiltdb.yaml index 5e4d0a4f..4eec988a 100644 --- a/config/samples/sidb/singleinstancedatabase_prebuiltdb.yaml +++ b/config/samples/sidb/singleinstancedatabase_prebuiltdb.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: SingleInstanceDatabase metadata: name: prebuiltdb-sample diff --git a/config/samples/sidb/singleinstancedatabase_standby.yaml b/config/samples/sidb/singleinstancedatabase_standby.yaml index 644438b4..d7ad4b23 100644 --- a/config/samples/sidb/singleinstancedatabase_standby.yaml +++ b/config/samples/sidb/singleinstancedatabase_standby.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: SingleInstanceDatabase metadata: # Creates base standbydatabase-sample. Use singleinstancedatabase_clone.yaml for cloning @@ -27,7 +27,7 @@ spec: ## Database image details image: - pullFrom: container-registry.oracle.com/database/enterprise:latest + pullFrom: container-registry.oracle.com/database/enterprise_ru:19 pullSecrets: oracle-container-registry-secret ## size is the required minimum size of the persistent volume diff --git a/config/samples/sidb/singleinstancedatabase_tcps.yaml b/config/samples/sidb/singleinstancedatabase_tcps.yaml index 06389f96..d3e3100b 100644 --- a/config/samples/sidb/singleinstancedatabase_tcps.yaml +++ b/config/samples/sidb/singleinstancedatabase_tcps.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: SingleInstanceDatabase metadata: # Creates base sidb-sample. Use singleinstancedatabase_clone.yaml for cloning diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index c150867f..3a0f15ec 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -1,9 +1,7 @@ - --- apiVersion: admissionregistration.k8s.io/v1 kind: MutatingWebhookConfiguration metadata: - creationTimestamp: null name: mutating-webhook-configuration webhooks: - admissionReviewVersions: @@ -12,50 +10,153 @@ webhooks: service: name: webhook-service namespace: system - path: /mutate-database-oracle-com-v1alpha1-autonomousdatabase + path: /mutate-database-oracle-com-v4-autonomousdatabasebackup failurePolicy: Fail - name: mautonomousdatabase.kb.io + name: mautonomousdatabasebackupv4.kb.io rules: - apiGroups: - database.oracle.com apiVersions: - - v1alpha1 + - v4 operations: - CREATE - UPDATE resources: - - autonomousdatabases + - autonomousdatabasebackups sideEffects: None - admissionReviewVersions: - v1 + - v1beta1 clientConfig: service: name: webhook-service namespace: system - path: /mutate-database-oracle-com-v1alpha1-autonomousdatabasebackup + path: /mutate-database-oracle-com-v4-cdb failurePolicy: Fail - name: mautonomousdatabasebackup.kb.io + name: mcdb.kb.io rules: - apiGroups: - database.oracle.com apiVersions: - - v1alpha1 + - v4 operations: - CREATE - UPDATE resources: - - autonomousdatabasebackups + - cdbs sideEffects: None - admissionReviewVersions: - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-database-oracle-com-v4-dbcssystem + failurePolicy: Fail + name: mdbcssystemv4.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - dbcssystems + sideEffects: None +- admissionReviewVersions: + - v4 - v1beta1 clientConfig: service: name: webhook-service namespace: system - path: /mutate-database-oracle-com-v1alpha1-cdb + path: /mutate-database-oracle-com-v4-lrest failurePolicy: Fail - name: mcdb.kb.io + name: mlrest.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - lrests + sideEffects: None +- admissionReviewVersions: + - v4 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-database-oracle-com-v4-lrpdb + failurePolicy: Fail + name: mlrpdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - lrpdbs + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-database-oracle-com-v4-pdb + failurePolicy: Fail + name: mpdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - pdbs + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-database-oracle-com-v4-shardingdatabase + failurePolicy: Fail + name: mshardingdatabasev4.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - shardingdatabases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-database-oracle-com-v1alpha1-autonomousdatabasebackup + failurePolicy: Fail + name: mautonomousdatabasebackupv1alpha1.kb.io rules: - apiGroups: - database.oracle.com @@ -65,7 +166,7 @@ webhooks: - CREATE - UPDATE resources: - - cdbs + - autonomousdatabasebackups sideEffects: None - admissionReviewVersions: - v1 @@ -88,6 +189,26 @@ webhooks: resources: - dataguardbrokers sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-database-oracle-com-v4-dbcssystem + failurePolicy: Fail + name: mdbcssystemv1alpha1.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - dbcssystems + sideEffects: None - admissionReviewVersions: - v1 - v1beta1 @@ -111,14 +232,13 @@ webhooks: sideEffects: None - admissionReviewVersions: - v1 - - v1beta1 clientConfig: service: name: webhook-service namespace: system - path: /mutate-database-oracle-com-v1alpha1-pdb + path: /mutate-database-oracle-com-v1alpha1-shardingdatabase failurePolicy: Fail - name: mpdb.kb.io + name: mshardingdatabasev1alpha1.kb.io rules: - apiGroups: - database.oracle.com @@ -128,7 +248,7 @@ webhooks: - CREATE - UPDATE resources: - - pdbs + - shardingdatabases sideEffects: None - admissionReviewVersions: - v1 @@ -171,6 +291,26 @@ webhooks: resources: - singleinstancedatabases sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-observability-oracle-com-v1-databaseobserver + failurePolicy: Fail + name: mdatabaseobserver.kb.io + rules: + - apiGroups: + - observability.oracle.com + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databaseobservers + sideEffects: None - admissionReviewVersions: - v1 clientConfig: @@ -191,12 +331,30 @@ webhooks: resources: - databaseobservers sideEffects: None - +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-observability-oracle-com-v4-databaseobserver + failurePolicy: Fail + name: mdatabaseobserver.kb.io + rules: + - apiGroups: + - observability.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - databaseobservers + sideEffects: None --- apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration metadata: - creationTimestamp: null name: validating-webhook-configuration webhooks: - admissionReviewVersions: @@ -205,14 +363,14 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-database-oracle-com-v1alpha1-autonomouscontainerdatabase + path: /validate-database-oracle-com-v4-autonomouscontainerdatabase failurePolicy: Fail - name: vautonomouscontainerdatabase.kb.io + name: vautonomouscontainerdatabasev4.kb.io rules: - apiGroups: - database.oracle.com apiVersions: - - v1alpha1 + - v4 operations: - CREATE - UPDATE @@ -225,9 +383,154 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-database-oracle-com-v1alpha1-autonomousdatabase + path: /validate-database-oracle-com-v4-autonomousdatabasebackup + failurePolicy: Fail + name: vautonomousdatabasebackupv4.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - autonomousdatabasebackups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-database-oracle-com-v4-autonomousdatabaserestore + failurePolicy: Fail + name: vautonomousdatabaserestorev4.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - autonomousdatabaserestores + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-database-oracle-com-v4-cdb + failurePolicy: Fail + name: vcdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - cdbs + sideEffects: None +- admissionReviewVersions: + - v4 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-database-oracle-com-v4-lrest + failurePolicy: Fail + name: vlrest.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - lrests + sideEffects: None +- admissionReviewVersions: + - v4 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-database-oracle-com-v4-lrpdb + failurePolicy: Fail + name: vlrpdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - lrpdbs + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-database-oracle-com-v4-pdb + failurePolicy: Fail + name: vpdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - pdbs + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-database-oracle-com-v4-shardingdatabase failurePolicy: Fail - name: vautonomousdatabase.kb.io + name: vshardingdatabasev4.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + - DELETE + resources: + - shardingdatabases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-database-oracle-com-v1alpha1-autonomouscontainerdatabase + failurePolicy: Fail + name: vautonomouscontainerdatabasev1alpha1.kb.io rules: - apiGroups: - database.oracle.com @@ -237,7 +540,7 @@ webhooks: - CREATE - UPDATE resources: - - autonomousdatabases + - autonomouscontainerdatabases sideEffects: None - admissionReviewVersions: - v1 @@ -247,7 +550,7 @@ webhooks: namespace: system path: /validate-database-oracle-com-v1alpha1-autonomousdatabasebackup failurePolicy: Fail - name: vautonomousdatabasebackup.kb.io + name: vautonomousdatabasebackupv1alpha1.kb.io rules: - apiGroups: - database.oracle.com @@ -267,7 +570,7 @@ webhooks: namespace: system path: /validate-database-oracle-com-v1alpha1-autonomousdatabaserestore failurePolicy: Fail - name: vautonomousdatabaserestore.kb.io + name: vautonomousdatabaserestorev1alpha1.kb.io rules: - apiGroups: - database.oracle.com @@ -281,14 +584,13 @@ webhooks: sideEffects: None - admissionReviewVersions: - v1 - - v1beta1 clientConfig: service: name: webhook-service namespace: system - path: /validate-database-oracle-com-v1alpha1-cdb + path: /validate-database-oracle-com-v1alpha1-autonomousdatabase failurePolicy: Fail - name: vcdb.kb.io + name: vautonomousdatabasev1alpha1.kb.io rules: - apiGroups: - database.oracle.com @@ -298,7 +600,7 @@ webhooks: - CREATE - UPDATE resources: - - cdbs + - autonomousdatabases sideEffects: None - admissionReviewVersions: - v1 @@ -344,14 +646,13 @@ webhooks: sideEffects: None - admissionReviewVersions: - v1 - - v1beta1 clientConfig: service: name: webhook-service namespace: system - path: /validate-database-oracle-com-v1alpha1-pdb + path: /validate-database-oracle-com-v1alpha1-shardingdatabase failurePolicy: Fail - name: vpdb.kb.io + name: vshardingdatabasev1alpha1.kb.io rules: - apiGroups: - database.oracle.com @@ -360,8 +661,9 @@ webhooks: operations: - CREATE - UPDATE + - DELETE resources: - - pdbs + - shardingdatabases sideEffects: None - admissionReviewVersions: - v1 @@ -406,6 +708,26 @@ webhooks: resources: - singleinstancedatabases sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-observability-oracle-com-v1-databaseobserver + failurePolicy: Fail + name: vdatabaseobserver.kb.io + rules: + - apiGroups: + - observability.oracle.com + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - databaseobservers + sideEffects: None - admissionReviewVersions: - v1 clientConfig: @@ -426,3 +748,23 @@ webhooks: resources: - databaseobservers sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-observability-oracle-com-v4-databaseobserver + failurePolicy: Fail + name: vdatabaseobserver.kb.io + rules: + - apiGroups: + - observability.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - databaseobservers + sideEffects: None diff --git a/controllers/database/autonomouscontainerdatabase_controller.go b/controllers/database/autonomouscontainerdatabase_controller.go index 3845a03f..73830eee 100644 --- a/controllers/database/autonomouscontainerdatabase_controller.go +++ b/controllers/database/autonomouscontainerdatabase_controller.go @@ -58,7 +58,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" - dbv1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" "github.com/oracle/oracle-database-operator/commons/annotations" "github.com/oracle/oracle-database-operator/commons/k8s" "github.com/oracle/oracle-database-operator/commons/oci" @@ -77,7 +77,7 @@ type AutonomousContainerDatabaseReconciler struct { // SetupWithManager sets up the controller with the Manager. func (r *AutonomousContainerDatabaseReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&dbv1alpha1.AutonomousContainerDatabase{}). + For(&dbv4.AutonomousContainerDatabase{}). WithEventFilter(r.eventFilterPredicate()). WithOptions(controller.Options{MaxConcurrentReconciles: 5}). Complete(r) @@ -86,13 +86,13 @@ func (r *AutonomousContainerDatabaseReconciler) SetupWithManager(mgr ctrl.Manage func (r *AutonomousContainerDatabaseReconciler) eventFilterPredicate() predicate.Predicate { pred := predicate.Funcs{ UpdateFunc: func(e event.UpdateEvent) bool { - desiredACD, acdOk := e.ObjectNew.(*dbv1alpha1.AutonomousContainerDatabase) + desiredACD, acdOk := e.ObjectNew.(*dbv4.AutonomousContainerDatabase) if acdOk { - oldACD := e.ObjectOld.(*dbv1alpha1.AutonomousContainerDatabase) + oldACD := e.ObjectOld.(*dbv4.AutonomousContainerDatabase) if !reflect.DeepEqual(oldACD.Status, desiredACD.Status) || - (controllerutil.ContainsFinalizer(oldACD, dbv1alpha1.LastSuccessfulSpec) != controllerutil.ContainsFinalizer(desiredACD, dbv1alpha1.LastSuccessfulSpec)) || - (controllerutil.ContainsFinalizer(oldACD, dbv1alpha1.ACDFinalizer) != controllerutil.ContainsFinalizer(desiredACD, dbv1alpha1.ACDFinalizer)) { + (controllerutil.ContainsFinalizer(oldACD, dbv4.LastSuccessfulSpec) != controllerutil.ContainsFinalizer(desiredACD, dbv4.LastSuccessfulSpec)) || + (controllerutil.ContainsFinalizer(oldACD, dbv4.ACDFinalizer) != controllerutil.ContainsFinalizer(desiredACD, dbv4.ACDFinalizer)) { // Don't enqueue if the status, lastSucSpec, or the finalizler changes return false } @@ -103,7 +103,7 @@ func (r *AutonomousContainerDatabaseReconciler) eventFilterPredicate() predicate }, DeleteFunc: func(e event.DeleteEvent) bool { // Do not trigger reconciliation when the object is deleted from the cluster. - _, acdOk := e.Object.(*dbv1alpha1.AutonomousContainerDatabase) + _, acdOk := e.Object.(*dbv4.AutonomousContainerDatabase) return !acdOk }, } @@ -124,10 +124,10 @@ func (r *AutonomousContainerDatabaseReconciler) Reconcile(ctx context.Context, r logger := r.Log.WithValues("Namespace/Name", req.NamespacedName) var err error - var ociACD *dbv1alpha1.AutonomousContainerDatabase + var ociACD *dbv4.AutonomousContainerDatabase // Get the autonomousdatabase instance from the cluster - acd := &dbv1alpha1.AutonomousContainerDatabase{} + acd := &dbv4.AutonomousContainerDatabase{} if err := r.KubeClient.Get(context.TODO(), req.NamespacedName, acd); err != nil { // Ignore not-found errors, since they can't be fixed by an immediate requeue. // No need to change the since we don't know if we obtain the object. @@ -159,7 +159,7 @@ func (r *AutonomousContainerDatabaseReconciler) Reconcile(ctx context.Context, r return r.manageError(logger, acd, err) } - ociACD = &dbv1alpha1.AutonomousContainerDatabase{} + ociACD = &dbv4.AutonomousContainerDatabase{} ociACD.UpdateFromOCIACD(resp.AutonomousContainerDatabase) } @@ -218,7 +218,7 @@ func (r *AutonomousContainerDatabaseReconciler) Reconcile(ctx context.Context, r return r.manageError(logger, acd, err) } - if dbv1alpha1.IsACDIntermediateState(acd.Status.LifecycleState) { + if dbv4.IsACDIntermediateState(acd.Status.LifecycleState) { logger.WithName("IsIntermediateState").Info("Current lifecycleState is " + string(acd.Status.LifecycleState) + "; reconcile queued") return requeueResult, nil } @@ -232,16 +232,16 @@ func (r *AutonomousContainerDatabaseReconciler) Reconcile(ctx context.Context, r return emptyResult, nil } -func (r *AutonomousContainerDatabaseReconciler) setupOCIClients(logger logr.Logger, acd *dbv1alpha1.AutonomousContainerDatabase) error { +func (r *AutonomousContainerDatabaseReconciler) setupOCIClients(logger logr.Logger, acd *dbv4.AutonomousContainerDatabase) error { var err error - authData := oci.APIKeyAuth{ + authData := oci.ApiKeyAuth{ ConfigMapName: acd.Spec.OCIConfig.ConfigMapName, SecretName: acd.Spec.OCIConfig.SecretName, Namespace: acd.GetNamespace(), } - provider, err := oci.GetOCIProvider(r.KubeClient, authData) + provider, err := oci.GetOciProvider(r.KubeClient, authData) if err != nil { return err } @@ -254,7 +254,7 @@ func (r *AutonomousContainerDatabaseReconciler) setupOCIClients(logger logr.Logg return nil } -func (r *AutonomousContainerDatabaseReconciler) manageError(logger logr.Logger, acd *dbv1alpha1.AutonomousContainerDatabase, issue error) (ctrl.Result, error) { +func (r *AutonomousContainerDatabaseReconciler) manageError(logger logr.Logger, acd *dbv4.AutonomousContainerDatabase, issue error) (ctrl.Result, error) { l := logger.WithName("manageError") // Has synced at least once @@ -290,7 +290,7 @@ func (r *AutonomousContainerDatabaseReconciler) manageError(logger logr.Logger, } // validateLifecycleState gets and validates the current lifecycleState -func (r *AutonomousContainerDatabaseReconciler) validateLifecycleState(logger logr.Logger, acd *dbv1alpha1.AutonomousContainerDatabase, ociACD *dbv1alpha1.AutonomousContainerDatabase) (needsRequeue bool, err error) { +func (r *AutonomousContainerDatabaseReconciler) validateLifecycleState(logger logr.Logger, acd *dbv4.AutonomousContainerDatabase, ociACD *dbv4.AutonomousContainerDatabase) (needsRequeue bool, err error) { if ociACD == nil { return false, nil } @@ -313,7 +313,7 @@ func (r *AutonomousContainerDatabaseReconciler) validateLifecycleState(logger lo return false, err } - if dbv1alpha1.IsACDIntermediateState(ociACD.Status.LifecycleState) { + if dbv4.IsACDIntermediateState(ociACD.Status.LifecycleState) { l.Info("LifecycleState is " + string(acd.Status.LifecycleState) + "; reconcile queued") return true, nil } @@ -321,7 +321,7 @@ func (r *AutonomousContainerDatabaseReconciler) validateLifecycleState(logger lo return false, nil } -func (r *AutonomousContainerDatabaseReconciler) validateCleanup(logger logr.Logger, acd *dbv1alpha1.AutonomousContainerDatabase) (exitReconcile bool, err error) { +func (r *AutonomousContainerDatabaseReconciler) validateCleanup(logger logr.Logger, acd *dbv4.AutonomousContainerDatabase) (exitReconcile bool, err error) { l := logger.WithName("validateCleanup") isACDToBeDeleted := acd.GetDeletionTimestamp() != nil @@ -330,7 +330,7 @@ func (r *AutonomousContainerDatabaseReconciler) validateCleanup(logger logr.Logg return false, nil } - if controllerutil.ContainsFinalizer(acd, dbv1alpha1.ACDFinalizer) { + if controllerutil.ContainsFinalizer(acd, dbv4.ACDFinalizer) { if acd.Status.LifecycleState == database.AutonomousContainerDatabaseLifecycleStateTerminating { l.Info("Resource is already in TERMINATING state") // Delete in progress, continue with the reconcile logic @@ -341,7 +341,7 @@ func (r *AutonomousContainerDatabaseReconciler) validateCleanup(logger logr.Logg // The acd has been deleted. Remove the finalizer and exit the reconcile. // Once all finalizers have been removed, the object will be deleted. l.Info("Resource is already in TERMINATED state; remove the finalizer") - if err := k8s.RemoveFinalizerAndPatch(r.KubeClient, acd, dbv1alpha1.ACDFinalizer); err != nil { + if err := k8s.RemoveFinalizerAndPatch(r.KubeClient, acd, dbv4.ACDFinalizer); err != nil { return false, err } return true, nil @@ -350,17 +350,17 @@ func (r *AutonomousContainerDatabaseReconciler) validateCleanup(logger logr.Logg if acd.Spec.AutonomousContainerDatabaseOCID == nil { l.Info("Missing AutonomousContainerDatabaseOCID to terminate Autonomous Container Database; remove the finalizer anyway", "Name", acd.Name, "Namespace", acd.Namespace) // Remove finalizer anyway. - if err := k8s.RemoveFinalizerAndPatch(r.KubeClient, acd, dbv1alpha1.ACDFinalizer); err != nil { + if err := k8s.RemoveFinalizerAndPatch(r.KubeClient, acd, dbv4.ACDFinalizer); err != nil { return false, err } return true, nil } - if acd.Spec.Action != dbv1alpha1.AcdActionTerminate { + if acd.Spec.Action != dbv4.AcdActionTerminate { // Run finalization logic for finalizer. If the finalization logic fails, don't remove the finalizer so // that we can retry during the next reconciliation. l.Info("Terminating Autonomous Container Database") - acd.Spec.Action = dbv1alpha1.AcdActionTerminate + acd.Spec.Action = dbv4.AcdActionTerminate if err := r.KubeClient.Update(context.TODO(), acd); err != nil { return false, err } @@ -376,15 +376,15 @@ func (r *AutonomousContainerDatabaseReconciler) validateCleanup(logger logr.Logg return true, nil } -func (r *AutonomousContainerDatabaseReconciler) validateFinalizer(acd *dbv1alpha1.AutonomousContainerDatabase) error { +func (r *AutonomousContainerDatabaseReconciler) validateFinalizer(acd *dbv4.AutonomousContainerDatabase) error { // Delete is not schduled. Update the finalizer for this CR if hardLink is present if acd.Spec.HardLink != nil { - if *acd.Spec.HardLink && !controllerutil.ContainsFinalizer(acd, dbv1alpha1.ACDFinalizer) { - if err := k8s.AddFinalizerAndPatch(r.KubeClient, acd, dbv1alpha1.ACDFinalizer); err != nil { + if *acd.Spec.HardLink && !controllerutil.ContainsFinalizer(acd, dbv4.ACDFinalizer) { + if err := k8s.AddFinalizerAndPatch(r.KubeClient, acd, dbv4.ACDFinalizer); err != nil { return err } - } else if !*acd.Spec.HardLink && controllerutil.ContainsFinalizer(acd, dbv1alpha1.ACDFinalizer) { - if err := k8s.RemoveFinalizerAndPatch(r.KubeClient, acd, dbv1alpha1.ACDFinalizer); err != nil { + } else if !*acd.Spec.HardLink && controllerutil.ContainsFinalizer(acd, dbv4.ACDFinalizer) { + if err := k8s.RemoveFinalizerAndPatch(r.KubeClient, acd, dbv4.ACDFinalizer); err != nil { return err } } @@ -395,8 +395,8 @@ func (r *AutonomousContainerDatabaseReconciler) validateFinalizer(acd *dbv1alpha func (r *AutonomousContainerDatabaseReconciler) validateOperation( logger logr.Logger, - acd *dbv1alpha1.AutonomousContainerDatabase, - ociACD *dbv1alpha1.AutonomousContainerDatabase) (exitReconcile bool, result ctrl.Result, err error) { + acd *dbv4.AutonomousContainerDatabase, + ociACD *dbv4.AutonomousContainerDatabase) (exitReconcile bool, result ctrl.Result, err error) { l := logger.WithName("validateOperation") @@ -511,7 +511,7 @@ func (r *AutonomousContainerDatabaseReconciler) validateOperation( } } -func (r *AutonomousContainerDatabaseReconciler) updateCR(acd *dbv1alpha1.AutonomousContainerDatabase) error { +func (r *AutonomousContainerDatabaseReconciler) updateCR(acd *dbv4.AutonomousContainerDatabase) error { // Update the lastSucSpec if err := acd.UpdateLastSuccessfulSpec(); err != nil { return err @@ -523,14 +523,14 @@ func (r *AutonomousContainerDatabaseReconciler) updateCR(acd *dbv1alpha1.Autonom return nil } -func (r *AutonomousContainerDatabaseReconciler) patchLastSuccessfulSpec(acd *dbv1alpha1.AutonomousContainerDatabase) error { +func (r *AutonomousContainerDatabaseReconciler) patchLastSuccessfulSpec(acd *dbv4.AutonomousContainerDatabase) error { specBytes, err := json.Marshal(acd.Spec) if err != nil { return err } anns := map[string]string{ - dbv1alpha1.LastSuccessfulSpec: string(specBytes), + dbv4.LastSuccessfulSpec: string(specBytes), } annotations.PatchAnnotations(r.KubeClient, acd, anns) @@ -538,7 +538,7 @@ func (r *AutonomousContainerDatabaseReconciler) patchLastSuccessfulSpec(acd *dbv return nil } -func (r *AutonomousContainerDatabaseReconciler) createACD(logger logr.Logger, acd *dbv1alpha1.AutonomousContainerDatabase) error { +func (r *AutonomousContainerDatabaseReconciler) createACD(logger logr.Logger, acd *dbv4.AutonomousContainerDatabase) error { logger.WithName("createACD").Info("Sending CreateAutonomousContainerDatabase request to OCI") resp, err := r.dbService.CreateAutonomousContainerDatabase(acd) @@ -551,7 +551,7 @@ func (r *AutonomousContainerDatabaseReconciler) createACD(logger logr.Logger, ac return nil } -func (r *AutonomousContainerDatabaseReconciler) getACD(logger logr.Logger, acd *dbv1alpha1.AutonomousContainerDatabase) (bool, error) { +func (r *AutonomousContainerDatabaseReconciler) getACD(logger logr.Logger, acd *dbv4.AutonomousContainerDatabase) (bool, error) { if acd == nil { return false, errors.New("AutonomousContainerDatabase OCID is missing") } @@ -573,10 +573,10 @@ func (r *AutonomousContainerDatabaseReconciler) getACD(logger logr.Logger, acd * // The AutonomousContainerDatabase is updated with the returned object from the OCI requests. func (r *AutonomousContainerDatabaseReconciler) updateACD( logger logr.Logger, - acd *dbv1alpha1.AutonomousContainerDatabase, - difACD *dbv1alpha1.AutonomousContainerDatabase) (ociReqSent bool, specChanged bool, err error) { + acd *dbv4.AutonomousContainerDatabase, + difACD *dbv4.AutonomousContainerDatabase) (ociReqSent bool, specChanged bool, err error) { - validations := []func(logr.Logger, *dbv1alpha1.AutonomousContainerDatabase, *dbv1alpha1.AutonomousContainerDatabase) (bool, bool, error){ + validations := []func(logr.Logger, *dbv4.AutonomousContainerDatabase, *dbv4.AutonomousContainerDatabase) (bool, bool, error){ r.validateGeneralFields, r.validateDesiredLifecycleState, } @@ -597,8 +597,8 @@ func (r *AutonomousContainerDatabaseReconciler) updateACD( func (r *AutonomousContainerDatabaseReconciler) validateGeneralFields( logger logr.Logger, - acd *dbv1alpha1.AutonomousContainerDatabase, - difACD *dbv1alpha1.AutonomousContainerDatabase) (sent bool, requeue bool, err error) { + acd *dbv4.AutonomousContainerDatabase, + difACD *dbv4.AutonomousContainerDatabase) (sent bool, requeue bool, err error) { if difACD.Spec.DisplayName == nil && difACD.Spec.PatchModel == "" && @@ -620,17 +620,17 @@ func (r *AutonomousContainerDatabaseReconciler) validateGeneralFields( func (r *AutonomousContainerDatabaseReconciler) validateDesiredLifecycleState( logger logr.Logger, - acd *dbv1alpha1.AutonomousContainerDatabase, - difACD *dbv1alpha1.AutonomousContainerDatabase) (sent bool, specChanged bool, err error) { + acd *dbv4.AutonomousContainerDatabase, + difACD *dbv4.AutonomousContainerDatabase) (sent bool, specChanged bool, err error) { - if difACD.Spec.Action == dbv1alpha1.AcdActionBlank { + if difACD.Spec.Action == dbv4.AcdActionBlank { return false, false, nil } l := logger.WithName("validateDesiredLifecycleState") switch difACD.Spec.Action { - case dbv1alpha1.AcdActionRestart: + case dbv4.AcdActionRestart: l.Info("Sending RestartAutonomousContainerDatabase request to OCI") resp, err := r.dbService.RestartAutonomousContainerDatabase(*acd.Spec.AutonomousContainerDatabaseOCID) @@ -639,7 +639,7 @@ func (r *AutonomousContainerDatabaseReconciler) validateDesiredLifecycleState( } acd.Status.LifecycleState = resp.LifecycleState - case dbv1alpha1.AcdActionTerminate: + case dbv4.AcdActionTerminate: l.Info("Sending TerminateAutonomousContainerDatabase request to OCI") _, err := r.dbService.TerminateAutonomousContainerDatabase(*acd.Spec.AutonomousContainerDatabaseOCID) @@ -652,7 +652,7 @@ func (r *AutonomousContainerDatabaseReconciler) validateDesiredLifecycleState( return false, false, errors.New("unknown lifecycleState") } - acd.Spec.Action = dbv1alpha1.AcdActionBlank + acd.Spec.Action = dbv4.AcdActionBlank return true, true, nil } diff --git a/controllers/database/autonomousdatabase_controller.go b/controllers/database/autonomousdatabase_controller.go index bf56bfe0..37ae1b14 100644 --- a/controllers/database/autonomousdatabase_controller.go +++ b/controllers/database/autonomousdatabase_controller.go @@ -40,7 +40,6 @@ package controllers import ( "context" - "encoding/json" "errors" "fmt" "reflect" @@ -49,7 +48,6 @@ import ( "time" "github.com/go-logr/logr" - "github.com/oracle/oci-go-sdk/v65/common" "github.com/oracle/oci-go-sdk/v65/database" apiErrors "k8s.io/apimachinery/pkg/api/errors" @@ -68,12 +66,14 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - dbv1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" - "github.com/oracle/oracle-database-operator/commons/annotations" + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" "github.com/oracle/oracle-database-operator/commons/k8s" "github.com/oracle/oracle-database-operator/commons/oci" ) +// name of our custom finalizer +const ADB_FINALIZER = "database.oracle.com/adb-finalizer" + var requeueResult ctrl.Result = ctrl.Result{Requeue: true, RequeueAfter: 15 * time.Second} var emptyResult ctrl.Result = ctrl.Result{} @@ -90,13 +90,9 @@ type AutonomousDatabaseReconciler struct { // SetupWithManager function func (r *AutonomousDatabaseReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&dbv1alpha1.AutonomousDatabase{}). - Watches( - &dbv1alpha1.AutonomousDatabaseBackup{}, - handler.EnqueueRequestsFromMapFunc(r.enqueueMapFn), - ). + For(&dbv4.AutonomousDatabase{}). Watches( - &dbv1alpha1.AutonomousDatabaseRestore{}, + &dbv4.AutonomousDatabaseRestore{}, handler.EnqueueRequestsFromMapFunc(r.enqueueMapFn), ). WithEventFilter(predicate.And(r.eventFilterPredicate(), r.watchPredicate())). @@ -121,22 +117,15 @@ func (r *AutonomousDatabaseReconciler) enqueueMapFn(ctx context.Context, o clien func (r *AutonomousDatabaseReconciler) watchPredicate() predicate.Predicate { return predicate.Funcs{ CreateFunc: func(e event.CreateEvent) bool { - _, backupOk := e.Object.(*dbv1alpha1.AutonomousDatabaseBackup) - _, restoreOk := e.Object.(*dbv1alpha1.AutonomousDatabaseRestore) + _, restoreOk := e.Object.(*dbv4.AutonomousDatabaseRestore) // Don't enqueue if the event is from Backup or Restore - return !(backupOk || restoreOk) + return !restoreOk }, UpdateFunc: func(e event.UpdateEvent) bool { // Enqueue the update event only when the status changes the first time - desiredBackup, backupOk := e.ObjectNew.(*dbv1alpha1.AutonomousDatabaseBackup) - if backupOk { - oldBackup := e.ObjectOld.(*dbv1alpha1.AutonomousDatabaseBackup) - return oldBackup.Status.LifecycleState == "" && desiredBackup.Status.LifecycleState != "" - } - - desiredRestore, restoreOk := e.ObjectNew.(*dbv1alpha1.AutonomousDatabaseRestore) + desiredRestore, restoreOk := e.ObjectNew.(*dbv4.AutonomousDatabaseRestore) if restoreOk { - oldRestore := e.ObjectOld.(*dbv1alpha1.AutonomousDatabaseRestore) + oldRestore := e.ObjectOld.(*dbv4.AutonomousDatabaseRestore) return oldRestore.Status.Status == "" && desiredRestore.Status.Status != "" } @@ -150,21 +139,17 @@ func (r *AutonomousDatabaseReconciler) eventFilterPredicate() predicate.Predicat return predicate.Funcs{ UpdateFunc: func(e event.UpdateEvent) bool { // source object can be AutonomousDatabase, AutonomousDatabaseBackup, or AutonomousDatabaseRestore - desiredADB, adbOk := e.ObjectNew.(*dbv1alpha1.AutonomousDatabase) + desiredAdb, adbOk := e.ObjectNew.(*dbv4.AutonomousDatabase) if adbOk { - oldADB := e.ObjectOld.(*dbv1alpha1.AutonomousDatabase) - - specChanged := !reflect.DeepEqual(oldADB.Spec, desiredADB.Spec) - statusChanged := !reflect.DeepEqual(oldADB.Status, desiredADB.Status) + oldAdb := e.ObjectOld.(*dbv4.AutonomousDatabase) - oldLastSucSpec := oldADB.GetAnnotations()[dbv1alpha1.LastSuccessfulSpec] - desiredLastSucSpec := desiredADB.GetAnnotations()[dbv1alpha1.LastSuccessfulSpec] - lastSucSpecChanged := oldLastSucSpec != desiredLastSucSpec + specChanged := !reflect.DeepEqual(oldAdb.Spec, desiredAdb.Spec) + statusChanged := !reflect.DeepEqual(oldAdb.Status, desiredAdb.Status) - if (!specChanged && statusChanged) || lastSucSpecChanged || - (controllerutil.ContainsFinalizer(oldADB, dbv1alpha1.ADB_FINALIZER) != controllerutil.ContainsFinalizer(desiredADB, dbv1alpha1.ADB_FINALIZER)) { + if (!specChanged && statusChanged) || + (controllerutil.ContainsFinalizer(oldAdb, ADB_FINALIZER) != controllerutil.ContainsFinalizer(desiredAdb, ADB_FINALIZER)) { // Don't enqueue in the folowing condition: - // 1. only status changes 2. lastSucSpec changes 3. ADB_FINALIZER changes + // 1. only status changes 2. ADB_FINALIZER changes return false } @@ -174,7 +159,7 @@ func (r *AutonomousDatabaseReconciler) eventFilterPredicate() predicate.Predicat }, DeleteFunc: func(e event.DeleteEvent) bool { // Do not trigger reconciliation when the object is deleted from the cluster. - _, adbOk := e.Object.(*dbv1alpha1.AutonomousDatabase) + _, adbOk := e.Object.(*dbv4.AutonomousDatabase) return !adbOk }, } @@ -189,158 +174,205 @@ func (r *AutonomousDatabaseReconciler) eventFilterPredicate() predicate.Predicat // +kubebuilder:rbac:groups="",resources=configmaps;secrets,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups="",resources=events,verbs=create;patch -// Reconcile is the funtion that the operator calls every time when the reconciliation loop is triggered. -// It go to the beggining of the reconcile if an error is returned. We won't return a error if it is related -// to OCI, because the issues cannot be solved by re-run the reconcile. func (r *AutonomousDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { logger := r.Log.WithValues("Namespace/Name", req.NamespacedName) var err error - var ociADB *dbv1alpha1.AutonomousDatabase + // Indicates whether spec has been changed at the end of the reconcile. + var specChanged bool = false // Get the autonomousdatabase instance from the cluster - desiredADB := &dbv1alpha1.AutonomousDatabase{} - if err := r.KubeClient.Get(context.TODO(), req.NamespacedName, desiredADB); err != nil { + desiredAdb := &dbv4.AutonomousDatabase{} + if err := r.KubeClient.Get(context.TODO(), req.NamespacedName, desiredAdb); err != nil { // Ignore not-found errors, since they can't be fixed by an immediate requeue. - // No need to change the since we don't know if we obtain the object. if apiErrors.IsNotFound(err) { return emptyResult, nil } - // Failed to get ADB, so we don't need to update the status return emptyResult, err } /****************************************************************** * Get OCI database client ******************************************************************/ - if err := r.setupOCIClients(logger, desiredADB); err != nil { - logger.Error(err, "Fail to setup OCI clients") - - return r.manageError(logger.WithName("setupOCIClients"), desiredADB, err) + if err := r.setupOCIClients(logger, desiredAdb); err != nil { + return r.manageError( + logger.WithName("setupOCIClients"), + desiredAdb, + fmt.Errorf("Failed to get OCI Database Client: %w", err)) } logger.Info("OCI clients configured succesfully") /****************************************************************** - * Cleanup the resource if the resource is to be deleted. - * Deletion timestamp will be added to a object before it is deleted. - * Kubernetes server calls the clean up function if a finalizer exitsts, and won't delete the real object until - * all the finalizers are removed from the object metadata. - * Refer to this page for more details of using finalizers: https://kubernetes.io/blog/2022/05/14/using-finalizers-to-control-deletion/ + * Fill the empty fields in the local resource at the beginning of + * the reconciliation. ******************************************************************/ - exitReconcile, err := r.validateCleanup(logger, desiredADB) - if err != nil { - return r.manageError(logger.WithName("validateCleanup"), desiredADB, err) - } - - if exitReconcile { - return emptyResult, nil + // Fill the empty fields in the AutonomousDatabase resource by + // syncing up with the Autonomous Database in OCI. Only the fields + // that have nil values will be overwritten. + var stateBeforeFirstSync = desiredAdb.Status.LifecycleState + if _, err = r.syncAutonomousDatabase(logger, desiredAdb, false); err != nil { + return r.manageError( + logger.WithName("syncAutonomousDatabase"), + desiredAdb, + fmt.Errorf("Failed to sync AutonomousDatabase: %w", err)) + } + + // If the lifecycle state changes from any other states to + // AVAILABLE and spec.action is an empty string, it means that + // the resource in OCI just finished the work, and the spec + // of the Autonomous Database in OCI might also change. + // This is because OCI won't update the spec until the work + // completes. In this case, we need to update the spec of + // the resource in local cluster. + if stateBeforeFirstSync != database.AutonomousDatabaseLifecycleStateAvailable && + desiredAdb.Status.LifecycleState == database.AutonomousDatabaseLifecycleStateAvailable { + if specChanged, err = r.syncAutonomousDatabase(logger, desiredAdb, true); err != nil { + return r.manageError( + logger.WithName("syncAutonomousDatabase"), + desiredAdb, + fmt.Errorf("Failed to sync AutonomousDatabase: %w", err)) + } } /****************************************************************** - * Register/unregister the finalizer + * Determine if the external resource needs to be cleaned up. + * If yes, delete the Autonomous Database in OCI and exits the + * reconcile function immediately. + * + * There is no need to check the other fields if the resource is + * under deletion. This method should be executed soon after the OCI + * database client is obtained and the local resource is synced in + * the above two steps. + * + * Kubernetes server calls the clean up function if a finalizer exitsts, + * and won't delete the object until all the finalizers are removed + * from the object metadata. ******************************************************************/ - exit, err := r.validateFinalizer(logger, desiredADB) - if err != nil { - return r.manageError(logger.WithName("validateFinalizer"), desiredADB, err) - } + if desiredAdb.GetDeletionTimestamp().IsZero() { + // The Autonomous Database is not being deleted. Update the finalizer. + if desiredAdb.Spec.HardLink != nil && + *desiredAdb.Spec.HardLink && + !controllerutil.ContainsFinalizer(desiredAdb, ADB_FINALIZER) { + + if err := k8s.AddFinalizerAndPatch(r.KubeClient, desiredAdb, ADB_FINALIZER); err != nil { + return emptyResult, fmt.Errorf("Failed to add finalizer to Autonomous Database "+desiredAdb.Name+": %w", err) + } + } else if desiredAdb.Spec.HardLink != nil && + !*desiredAdb.Spec.HardLink && + controllerutil.ContainsFinalizer(desiredAdb, ADB_FINALIZER) { - if exit { - return emptyResult, nil + if err := k8s.RemoveFinalizerAndPatch(r.KubeClient, desiredAdb, ADB_FINALIZER); err != nil { + return emptyResult, fmt.Errorf("Failed to remove finalizer to Autonomous Database "+desiredAdb.Name+": %w", err) + } + } + } else { + // The Autonomous Database is being deleted + if controllerutil.ContainsFinalizer(desiredAdb, ADB_FINALIZER) { + if dbv4.IsAdbIntermediateState(desiredAdb.Status.LifecycleState) { + // No-op + } else if desiredAdb.Status.LifecycleState == database.AutonomousDatabaseLifecycleStateTerminated { + // The Autonomous Database in OCI has been deleted. Remove the finalizer. + if err := k8s.RemoveFinalizerAndPatch(r.KubeClient, desiredAdb, ADB_FINALIZER); err != nil { + return emptyResult, fmt.Errorf("Failed to remove finalizer to Autonomous Database "+desiredAdb.Name+": %w", err) + } + } else { + // Remove the Autonomous Database in OCI. + // Change the action to Terminate and proceed with the rest of the reconcile logic + desiredAdb.Spec.Action = "Terminate" + } + } } - /****************************************************************** - * Validate operations - ******************************************************************/ - modifiedADB := desiredADB.DeepCopy() // the ADB which stores the changes - exitReconcile, result, err := r.validateOperation(logger, modifiedADB, ociADB) - if err != nil { - return r.manageError(logger.WithName("validateOperation"), modifiedADB, err) - } - if exitReconcile { - return result, nil - } + if !dbv4.IsAdbIntermediateState(desiredAdb.Status.LifecycleState) { + /****************************************************************** + * Perform operations + ******************************************************************/ + var specChangedAfterOperation bool + specChangedAfterOperation, err = r.performOperation(logger, desiredAdb) + if err != nil { + return r.manageError( + logger.WithName("performOperation"), + desiredAdb, + fmt.Errorf("Failed to operate database action: %w", err)) + } - /***************************************************** - * Sync AutonomousDatabase Backups from OCI - *****************************************************/ - if err := r.syncBackupResources(logger, modifiedADB); err != nil { - return r.manageError(logger.WithName("syncBackupResources"), modifiedADB, err) - } + if specChangedAfterOperation { + specChanged = true + } - /***************************************************** - * Validate Wallet - *****************************************************/ - if err := r.validateWallet(logger, modifiedADB); err != nil { - return r.manageError(logger.WithName("validateWallet"), modifiedADB, err) - } + /****************************************************************** + * Sync AutonomousDatabase Backups from OCI. + * The backups will not be synced when the lifecycle state is + * TERMINATING or TERMINATED. + ******************************************************************/ + if desiredAdb.Status.LifecycleState != database.AutonomousDatabaseLifecycleStateTerminating && + desiredAdb.Status.LifecycleState != database.AutonomousDatabaseLifecycleStateTerminated { + if err := r.syncBackupResources(logger, desiredAdb); err != nil { + return r.manageError(logger.WithName("syncBackupResources"), desiredAdb, err) + } + } - /****************************************************************** - * Update the resource if the spec has been changed. - * This will trigger another reconcile, so returns with an empty - * result. - ******************************************************************/ - if !reflect.DeepEqual(modifiedADB.Spec, desiredADB.Spec) { - if err := r.KubeClient.Update(context.TODO(), modifiedADB); err != nil { - return r.manageError(logger.WithName("updateSpec"), modifiedADB, err) + /***************************************************** + * Validate Wallet + *****************************************************/ + if err := r.validateWallet(logger, desiredAdb); err != nil { + return r.manageError( + logger.WithName("validateWallet"), + desiredAdb, + fmt.Errorf("Failed to validate Wallet: %w", err)) } - return emptyResult, nil } /****************************************************************** - * Update the status at the end of every reconcile. + * Update the Autonomous Database at the end of every reconcile. ******************************************************************/ - copiedADB := modifiedADB.DeepCopy() - - updateCondition(modifiedADB, nil) - if err := r.KubeClient.Status().Update(context.TODO(), modifiedADB); err != nil { - return r.manageError(logger.WithName("Status().Update"), modifiedADB, err) + if specChanged { + if err := r.KubeClient.Update(context.TODO(), desiredAdb); err != nil { + return r.manageError( + logger.WithName("updateSpec"), + desiredAdb, + fmt.Errorf("Failed to update AutonomousDatabase spec: %w", err)) + } + // Immediately exit the reconcile loop if the resource is updated, and let + // the next run continue. + return emptyResult, nil } - modifiedADB.Spec = copiedADB.Spec - if dbv1alpha1.IsADBIntermediateState(modifiedADB.Status.LifecycleState) { - logger.WithName("IsADBIntermediateState").Info("LifecycleState is " + string(modifiedADB.Status.LifecycleState) + "; reconcile queued") - return requeueResult, nil + updateCondition(desiredAdb, nil) + if err := r.KubeClient.Status().Update(context.TODO(), desiredAdb); err != nil { + return r.manageError( + logger, + desiredAdb, + fmt.Errorf("Failed to update AutonomousDatabase status: %w", err)) } /****************************************************************** - * Update the lastSucSpec, and then finish the reconcile. - * Requeue if the ADB is terminated, but the finalizer is not yet - * removed. + * Requeue the request in the following cases: + * 1. the ADB is in intermediate state + * 2. the ADB is terminated, but the finalizer is not yet removed. ******************************************************************/ - - var requeue bool = false - if modifiedADB.GetDeletionTimestamp() != nil && - controllerutil.ContainsFinalizer(modifiedADB, dbv1alpha1.ADB_FINALIZER) && - modifiedADB.Status.LifecycleState == database.AutonomousDatabaseLifecycleStateTerminated { - logger.Info("The ADB is TERMINATED. The CR is to be deleted but finalizer is not yet removed; reconcile queued") - requeue = true - } - - if err := r.patchLastSuccessfulSpec(modifiedADB); err != nil { - return r.manageError(logger.WithName("patchLastSuccessfulSpec"), modifiedADB, err) - } - - if requeue { - logger.Info("Reconcile queued") + if dbv4.IsAdbIntermediateState(desiredAdb.Status.LifecycleState) { + logger. + WithName("IsAdbIntermediateState"). + Info("LifecycleState is " + string(desiredAdb.Status.LifecycleState) + "; reconciliation queued") return requeueResult, nil - } else { logger.Info("AutonomousDatabase reconciles successfully") return emptyResult, nil } } -func (r *AutonomousDatabaseReconciler) setupOCIClients(logger logr.Logger, adb *dbv1alpha1.AutonomousDatabase) error { +func (r *AutonomousDatabaseReconciler) setupOCIClients(logger logr.Logger, adb *dbv4.AutonomousDatabase) error { var err error - authData := oci.APIKeyAuth{ - ConfigMapName: adb.Spec.OCIConfig.ConfigMapName, - SecretName: adb.Spec.OCIConfig.SecretName, + authData := oci.ApiKeyAuth{ + ConfigMapName: adb.Spec.OciConfig.ConfigMapName, + SecretName: adb.Spec.OciConfig.SecretName, Namespace: adb.GetNamespace(), } - provider, err := oci.GetOCIProvider(r.KubeClient, authData) + provider, err := oci.GetOciProvider(r.KubeClient, authData) if err != nil { return err } @@ -353,894 +385,282 @@ func (r *AutonomousDatabaseReconciler) setupOCIClients(logger logr.Logger, adb * return nil } -func (r *AutonomousDatabaseReconciler) manageError(logger logr.Logger, adb *dbv1alpha1.AutonomousDatabase, err error) (ctrl.Result, error) { +// Upates the status with the error and returns an empty result +func (r *AutonomousDatabaseReconciler) manageError(logger logr.Logger, adb *dbv4.AutonomousDatabase, err error) (ctrl.Result, error) { l := logger.WithName("manageError") - if adb.Status.LifecycleState == "" { - // First time entering reconcile - updateCondition(adb, err) - l.Error(err, "CreateFailed") - - return emptyResult, nil - } else { - // Has synced at least once - var finalError = err - - // Roll back - ociADB := adb.DeepCopy() - specChanged, err := r.getADB(l, ociADB) - if err != nil { - finalError = k8s.CombineErrors(finalError, err) - } + l.Error(err, "Error occured") - // Will exit the Reconcile anyway after the manageError is called. - if specChanged { - // Clear the lifecycleState first to avoid the webhook error when update during an intermediate state - adb.Status.LifecycleState = "" - if err := r.KubeClient.Status().Update(context.TODO(), adb); err != nil { - finalError = k8s.CombineErrors(finalError, err) - } - - adb.Spec = ociADB.Spec - - if err := r.KubeClient.Update(context.TODO(), adb); err != nil { - finalError = k8s.CombineErrors(finalError, err) - } - } - - updateCondition(adb, err) - - l.Error(finalError, "UpdateFailed") - - return emptyResult, nil + updateCondition(adb, err) + if err := r.KubeClient.Status().Update(context.TODO(), adb); err != nil { + return emptyResult, fmt.Errorf("Failed to update status: %w", err) } + return emptyResult, nil } -const CONDITION_TYPE_COMPLETE = "Complete" -const CONDITION_REASON_COMPLETE = "ReconcileComplete" +const CONDITION_TYPE_AVAILABLE = "Available" +const CONDITION_REASON_AVAILABLE = "Available" +const CONDITION_TYPE_RECONCILE_QUEUED = "ReconcileQueued" +const CONDITION_REASON_RECONCILE_QUEUED = "LastReconcileQueued" +const CONDITION_TYPE_RECONCILE_ERROR = "ReconfileError" +const CONDITION_REASON_RECONCILE_ERROR = "LastReconcileError" -func updateCondition(adb *dbv1alpha1.AutonomousDatabase, err error) { +func updateCondition(adb *dbv4.AutonomousDatabase, err error) { var condition metav1.Condition + var errMsg string - errMsg := func() string { - if err != nil { - return err.Error() + if err != nil { + errMsg = err.Error() + } + + // Clean up the Conditions array + if len(adb.Status.Conditions) > 0 { + var allConditions = []string{ + CONDITION_TYPE_AVAILABLE, + CONDITION_TYPE_RECONCILE_QUEUED, + CONDITION_TYPE_RECONCILE_ERROR} + + for _, conditionType := range allConditions { + meta.RemoveStatusCondition(&adb.Status.Conditions, conditionType) } - return "no reconcile errors" - }() + } - // If error occurs, ReconcileComplete will be marked as true and the error message will still be listed - // If the ADB lifecycleState is intermediate, then ReconcileComplete will be marked as false + // If error occurs, the condition status will be marked as false and the error message will still be listed + // If the ADB lifecycleState is intermediate, then condition status will be marked as true + // Otherwise, then condition status will be marked as true if no error occurs if err != nil { condition = metav1.Condition{ - Type: CONDITION_TYPE_COMPLETE, + Type: CONDITION_TYPE_RECONCILE_ERROR, LastTransitionTime: metav1.Now(), ObservedGeneration: adb.GetGeneration(), - Reason: CONDITION_REASON_COMPLETE, + Reason: CONDITION_REASON_RECONCILE_ERROR, Message: errMsg, - Status: metav1.ConditionTrue, + Status: metav1.ConditionFalse, } - } else if dbv1alpha1.IsADBIntermediateState(adb.Status.LifecycleState) { + } else if dbv4.IsAdbIntermediateState(adb.Status.LifecycleState) { condition = metav1.Condition{ - Type: CONDITION_TYPE_COMPLETE, + Type: CONDITION_TYPE_RECONCILE_QUEUED, LastTransitionTime: metav1.Now(), ObservedGeneration: adb.GetGeneration(), - Reason: CONDITION_REASON_COMPLETE, - Message: errMsg, - Status: metav1.ConditionFalse, + Reason: CONDITION_REASON_RECONCILE_QUEUED, + Message: "no reconcile errors", + Status: metav1.ConditionTrue, } } else { condition = metav1.Condition{ - Type: CONDITION_TYPE_COMPLETE, + Type: CONDITION_TYPE_AVAILABLE, LastTransitionTime: metav1.Now(), ObservedGeneration: adb.GetGeneration(), - Reason: CONDITION_REASON_COMPLETE, - Message: errMsg, + Reason: CONDITION_REASON_AVAILABLE, + Message: "no reconcile errors", Status: metav1.ConditionTrue, } } - if len(adb.Status.Conditions) > 0 { - meta.RemoveStatusCondition(&adb.Status.Conditions, condition.Type) - } meta.SetStatusCondition(&adb.Status.Conditions, condition) } -func (r *AutonomousDatabaseReconciler) validateOperation( +func (r *AutonomousDatabaseReconciler) performOperation( logger logr.Logger, - adb *dbv1alpha1.AutonomousDatabase, - ociADB *dbv1alpha1.AutonomousDatabase) (exit bool, result ctrl.Result, err error) { - - lastSucSpec, err := adb.GetLastSuccessfulSpec() - if err != nil { - return false, emptyResult, err - } + adb *dbv4.AutonomousDatabase) (specChanged bool, err error) { l := logger.WithName("validateOperation") - // If lastSucSpec is nil, then it's CREATE or BIND opertaion - if lastSucSpec == nil { - if adb.Spec.Details.AutonomousDatabaseOCID == nil { - l.Info("Create operation") - err := r.createADB(logger, adb) - if err != nil { - return false, emptyResult, err - } - - // Update the ADB OCID - if err := r.updateCR(adb); err != nil { - return false, emptyResult, err - } - - l.Info("AutonomousDatabaseOCID updated; exit reconcile") - return true, emptyResult, nil - } else { - l.Info("Bind operation") - _, err := r.getADB(logger, adb) - if err != nil { - return false, emptyResult, err - } - - if err := r.updateCR(adb); err != nil { - return false, emptyResult, err - } - - l.Info("spec updated; exit reconcile") - return true, emptyResult, nil + switch adb.Spec.Action { + case "Create": + l.Info("Create operation") + err := r.createAutonomousDatabase(logger, adb) + if err != nil { + return false, err } - } - // If it's not CREATE or BIND opertaion, then it's UPDATE or SYNC operation. - // In most of the case the user changes the spec, and we update the oci ADB, but when the user updates on - // the Cloud Console, the controller cannot tell the direction and how to update the resource. - // Thus we compare the current spec with the lastSucSpec. If the details are different, it means that - // the user updates the spec (UPDATE operation), otherwise it's a SYNC operation. - lastDifADB := adb.DeepCopy() + adb.Spec.Action = "" + return true, nil - lastDetailsChanged, err := lastDifADB.RemoveUnchangedDetails(*lastSucSpec) - if err != nil { - return false, emptyResult, err - } + case "Sync": + l.Info("Sync operation") + _, err = r.syncAutonomousDatabase(logger, adb, true) + if err != nil { + return false, err + } - if lastDetailsChanged { - // Double check if the user input spec is actually different from the spec in OCI. If so, then update the resource. - // When the update completes and the status changes from UPDATING to AVAILABLE, the lastSucSpec is not updated yet, - // so we compare with the oci ADB again to make sure that the updates are completed. + adb.Spec.Action = "" + return true, nil + case "Update": l.Info("Update operation") - - exit, err := r.updateADB(logger, adb) + err = r.updateAutonomousDatabase(logger, adb) if err != nil { - return false, emptyResult, err + return false, err } - return exit, emptyResult, nil + adb.Spec.Action = "" + return true, nil - } else { - l.Info("No operation specified; sync the resource") + case "Stop": + l.Info("Sending StopAutonomousDatabase request to OCI") - // The user doesn't change the spec and the controller should pull the spec from the OCI. - specChanged, err := r.getADB(logger, adb) + resp, err := r.dbService.StopAutonomousDatabase(*adb.Spec.Details.Id) if err != nil { - return false, emptyResult, err + return false, err } - if specChanged { - l.Info("The local spec doesn't match the oci's spec; update the CR") - - // Erase the status.lifecycleState temporarily to avoid the webhook error. - tmpADB := adb.DeepCopy() - adb.Status.LifecycleState = "" - if err := r.KubeClient.Status().Update(context.TODO(), adb); err != nil { - return false, emptyResult, err - } - adb.Spec = tmpADB.Spec + adb.Spec.Action = "" + adb.Status.LifecycleState = resp.LifecycleState + return true, nil - if err := r.updateCR(adb); err != nil { - return false, emptyResult, err - } + case "Start": + l.Info("Sending StartAutonomousDatabase request to OCI") - return true, emptyResult, nil + resp, err := r.dbService.StartAutonomousDatabase(*adb.Spec.Details.Id) + if err != nil { + return false, err } - return false, emptyResult, nil - } -} -func (r *AutonomousDatabaseReconciler) validateCleanup(logger logr.Logger, adb *dbv1alpha1.AutonomousDatabase) (exitReconcile bool, err error) { - l := logger.WithName("validateCleanup") - - isADBToBeDeleted := adb.GetDeletionTimestamp() != nil - - if !isADBToBeDeleted { - return false, nil - } + adb.Spec.Action = "" + adb.Status.LifecycleState = resp.LifecycleState + return true, nil - if controllerutil.ContainsFinalizer(adb, dbv1alpha1.ADB_FINALIZER) { - if adb.Status.LifecycleState == database.AutonomousDatabaseLifecycleStateTerminating { - // Delete in progress, continue with the reconcile logic - return false, nil - } + case "Terminate": + // OCI only allows terminate operation when the ADB is in an valid state, otherwise requeue the reconcile. + if dbv4.CanBeTerminated(adb.Status.LifecycleState) { + l.Info("Sending DeleteAutonomousDatabase request to OCI") - if adb.Status.LifecycleState == database.AutonomousDatabaseLifecycleStateTerminated { - // The adb has been deleted. Remove the finalizer and exit the reconcile. - // Once all finalizers have been removed, the object will be deleted. - l.Info("Resource is in TERMINATED state; remove the finalizer") - if err := k8s.RemoveFinalizerAndPatch(r.KubeClient, adb, dbv1alpha1.ADB_FINALIZER); err != nil { + _, err := r.dbService.DeleteAutonomousDatabase(*adb.Spec.Details.Id) + if err != nil { return false, err } - return true, nil - } - if adb.Spec.Details.AutonomousDatabaseOCID == nil { - l.Info("Missing AutonomousDatabaseOCID to terminate Autonomous Database; remove the finalizer anyway", "Name", adb.Name, "Namespace", adb.Namespace) - // Remove finalizer anyway. - if err := k8s.RemoveFinalizerAndPatch(r.KubeClient, adb, dbv1alpha1.ADB_FINALIZER); err != nil { + if err := r.removeBackupResources(l, adb); err != nil { return false, err } - return true, nil - } - if adb.Spec.Details.LifecycleState != database.AutonomousDatabaseLifecycleStateTerminated { - // Run finalization logic for finalizer. If the finalization logic fails, don't remove the finalizer so - // that we can retry during the next reconciliation. - l.Info("Terminating Autonomous Database") - adb.Spec.Details.LifecycleState = database.AutonomousDatabaseLifecycleStateTerminated - if err := r.KubeClient.Update(context.TODO(), adb); err != nil { - return false, err - } - // Exit the reconcile since we have updated the spec - return true, nil + adb.Status.LifecycleState = database.AutonomousDatabaseLifecycleStateTerminating + } else if dbv4.IsAdbIntermediateState(adb.Status.LifecycleState) { + l.Info("Can not terminate an ADB in an intermediate state; exit reconcile") } - // Continue with the reconcile logic - return false, nil - } - - // Exit the Reconcile since the to-be-deleted resource doesn't has a finalizer - return true, nil -} - -func (r *AutonomousDatabaseReconciler) validateFinalizer(logger logr.Logger, adb *dbv1alpha1.AutonomousDatabase) (exit bool, err error) { - l := logger.WithName("validateFinalizer") - - // Delete is not schduled. Update the finalizer for this CR if hardLink is present - var finalizerChanged = false - if adb.Spec.HardLink != nil { - if *adb.Spec.HardLink && !controllerutil.ContainsFinalizer(adb, dbv1alpha1.ADB_FINALIZER) { - l.Info("Finalizer added") - if err := k8s.AddFinalizerAndPatch(r.KubeClient, adb, dbv1alpha1.ADB_FINALIZER); err != nil { - return false, err - } - - finalizerChanged = true + adb.Spec.Action = "" + return true, nil - } else if !*adb.Spec.HardLink && controllerutil.ContainsFinalizer(adb, dbv1alpha1.ADB_FINALIZER) { - l.Info("Finalizer removed") + case "Clone": + resp, err := r.dbService.CreateAutonomousDatabaseClone(adb) + if err != nil { + return false, err + } + adb.Status.LifecycleState = resp.LifecycleState - if err := k8s.RemoveFinalizerAndPatch(r.KubeClient, adb, dbv1alpha1.ADB_FINALIZER); err != nil { - return false, err - } + adb.Spec.Action = "" - finalizerChanged = true + // Create cloned Autonomous Database resource + clonedAdb := &dbv4.AutonomousDatabase{ + ObjectMeta: metav1.ObjectMeta{ + Name: *adb.Spec.Clone.DisplayName, + Namespace: adb.Namespace, + }, + Spec: dbv4.AutonomousDatabaseSpec{ + OciConfig: *adb.Spec.OciConfig.DeepCopy(), + }, + } + clonedAdb.UpdateFromOciAdb(resp.AutonomousDatabase, true) + if err := r.KubeClient.Create(context.TODO(), clonedAdb); err != nil { + return false, err } - } - - // If the finalizer is changed during an intermediate state, e.g. set hardLink to true and - // delete the resource, then there must be another ongoing reconcile. In this case we should - // exit the reconcile. - if finalizerChanged && dbv1alpha1.IsADBIntermediateState(adb.Status.LifecycleState) { - l.Info("Finalizer changed during an intermediate state, exit the reconcile") return true, nil - } - return false, nil -} - -// updateCR updates the lastSucSpec and the CR -func (r *AutonomousDatabaseReconciler) updateCR(adb *dbv1alpha1.AutonomousDatabase) error { - // Update the lastSucSpec - // Should patch the lastSuccessfulSpec first, otherwise, the update event will be - // filtered out by predicate since the lastSuccessfulSpec is changed. - if err := r.patchLastSuccessfulSpec(adb); err != nil { - return err - } - - if err := r.KubeClient.Update(context.TODO(), adb); err != nil { - return err - } - return nil -} - -func (r *AutonomousDatabaseReconciler) patchLastSuccessfulSpec(adb *dbv1alpha1.AutonomousDatabase) error { - copyADB := adb.DeepCopy() - - specBytes, err := json.Marshal(adb.Spec) - if err != nil { - return err - } - - anns := map[string]string{ - dbv1alpha1.LastSuccessfulSpec: string(specBytes), + case "": + // No-op + return false, nil + default: + adb.Spec.Action = "" + return true, errors.New("Unknown action: " + adb.Spec.Action) } - - annotations.PatchAnnotations(r.KubeClient, adb, anns) - - adb.Spec = copyADB.Spec - adb.Status = copyADB.Status - - return nil } -func (r *AutonomousDatabaseReconciler) createADB(logger logr.Logger, adb *dbv1alpha1.AutonomousDatabase) error { +func (r *AutonomousDatabaseReconciler) createAutonomousDatabase(logger logr.Logger, adb *dbv4.AutonomousDatabase) error { logger.WithName("createADB").Info("Sending CreateAutonomousDatabase request to OCI") resp, err := r.dbService.CreateAutonomousDatabase(adb) if err != nil { return err } - // Restore the admin password after updating from OCI ADB - adminPass := adb.Spec.Details.AdminPassword - adb.UpdateFromOCIADB(resp.AutonomousDatabase) - adb.Spec.Details.AdminPassword = adminPass + adb.UpdateFromOciAdb(resp.AutonomousDatabase, true) return nil } -// getADB gets the information from OCI and overwrites the spec and the status, but not update the CR in the cluster -func (r *AutonomousDatabaseReconciler) getADB(logger logr.Logger, adb *dbv1alpha1.AutonomousDatabase) (bool, error) { - if adb == nil { - return false, errors.New("AutonomousDatabase OCID is missing") +// syncAutonomousDatabase retrieve the information of AutonomousDatabase from +// OCI and "overwrite" decides whether the spec and the status of "adb" will +// be overwritten. +// It will be a no-op if "Spec.Details.AutonomousDatabaseOCID" of the provided +// AutonomousDatabase is nil. +// This method does not update the actual resource in the cluster. +// +// The returned values are: +// 1. bool: indicates whether the spec is changed after the sync +// 2. error: not nil if an error occurs during the sync +func (r *AutonomousDatabaseReconciler) syncAutonomousDatabase( + logger logr.Logger, + adb *dbv4.AutonomousDatabase, overwrite bool) (specChanged bool, err error) { + if adb.Spec.Details.Id == nil { + return false, nil } - l := logger.WithName("getADB") + l := logger.WithName("syncAutonomousDatabase") // Get the information from OCI l.Info("Sending GetAutonomousDatabase request to OCI") - resp, err := r.dbService.GetAutonomousDatabase(*adb.Spec.Details.AutonomousDatabaseOCID) + resp, err := r.dbService.GetAutonomousDatabase(*adb.Spec.Details.Id) if err != nil { return false, err } - specChanged := adb.UpdateFromOCIADB(resp.AutonomousDatabase) - + specChanged = adb.UpdateFromOciAdb(resp.AutonomousDatabase, overwrite) return specChanged, nil } -// updateADB returns true if an OCI request is sent. +// updateAutonomousDatabase returns true if an OCI request is sent. // The AutonomousDatabase is updated with the returned object from the OCI requests. -func (r *AutonomousDatabaseReconciler) updateADB( +func (r *AutonomousDatabaseReconciler) updateAutonomousDatabase( logger logr.Logger, - adb *dbv1alpha1.AutonomousDatabase) (exit bool, err error) { - - l := logger.WithName("updateADB") + adb *dbv4.AutonomousDatabase) (err error) { // Get OCI AutonomousDatabase and update the lifecycleState of the CR, // so that the validatexx functions know when the state changes back to AVAILABLE - ociADB := adb.DeepCopy() - _, err = r.getADB(logger, ociADB) + ociAdb := adb.DeepCopy() + _, err = r.syncAutonomousDatabase(logger, ociAdb, true) if err != nil { - return false, err + return err } - adb.Status.LifecycleState = ociADB.Status.LifecycleState - // Start update - difADB := adb.DeepCopy() + // difAdb is used to store ONLY the values of Autonomous Database that are + // difference from the one in OCI + difAdb := adb.DeepCopy() - ociDetailsChanged, err := difADB.RemoveUnchangedDetails(ociADB.Spec) + detailsAreChanged, err := difAdb.RemoveUnchangedDetails(ociAdb.Spec) if err != nil { - return false, err + return err } // Do the update request only if the current ADB is actually different from the OCI ADB - if ociDetailsChanged { - // Special case: if the oci ADB is terminating, then update the spec and exit the reconcile. - // This happens when the lifecycleState changes to TERMINATED during an intermediate state, - // whatever is in progress should be abandonded and the desired spec should the same as oci ADB. - if ociADB.Status.LifecycleState == database.AutonomousDatabaseLifecycleStateTerminating { - l.Info("OCI ADB is in TERMINATING state; update the spec and exit the reconcile") - - adb.Status.LifecycleState = "" - if err := r.KubeClient.Status().Update(context.TODO(), adb); err != nil { - return false, err - } - - adb.Spec = ociADB.Spec - if err := r.KubeClient.Update(context.TODO(), adb); err != nil { - return false, err - } - return true, nil - } - - // Special case: if the lifecycleState is changed, it might have to exit the reconcile in some cases. - sent, exit, err := r.validateDesiredLifecycleState(logger, adb, difADB, ociADB) - if err != nil { - return false, err - } - if sent { - return exit, nil - } - - validations := []func(logr.Logger, *dbv1alpha1.AutonomousDatabase, *dbv1alpha1.AutonomousDatabase, *dbv1alpha1.AutonomousDatabase) (bool, error){ - r.validateGeneralFields, - r.validateAdminPassword, - r.validateDbWorkload, - r.validateLicenseModel, - r.validateScalingFields, - r.validateGeneralNetworkAccess, - } - - for _, op := range validations { - sent, err := op(logger, adb, difADB, ociADB) - if err != nil { - return false, err - } - - if sent { - return false, nil - } - } - } - - return false, nil -} - -func (r *AutonomousDatabaseReconciler) validateGeneralFields( - logger logr.Logger, - adb *dbv1alpha1.AutonomousDatabase, - difADB *dbv1alpha1.AutonomousDatabase, - ociADB *dbv1alpha1.AutonomousDatabase) (sent bool, err error) { - - if difADB.Spec.Details.DisplayName == nil && - difADB.Spec.Details.DbName == nil && - difADB.Spec.Details.DbVersion == nil && - difADB.Spec.Details.FreeformTags == nil { - return false, nil - } + if detailsAreChanged { + logger.Info("Sending UpdateAutonomousDatabase request to OCI") - if ociADB.Status.LifecycleState != database.AutonomousDatabaseLifecycleStateAvailable { - return false, nil - } - - l := logger.WithName("validateGeneralFields") - - l.Info("Sending UpdateAutonomousDatabase request to OCI") - resp, err := r.dbService.UpdateAutonomousDatabaseGeneralFields(*adb.Spec.Details.AutonomousDatabaseOCID, difADB) - if err != nil { - return false, err - } - - adb.UpdateFromOCIADB(resp.AutonomousDatabase) - - return true, nil -} - -// Special case: compare with lastSpec but not ociSpec -func (r *AutonomousDatabaseReconciler) validateAdminPassword( - logger logr.Logger, - adb *dbv1alpha1.AutonomousDatabase, - difADB *dbv1alpha1.AutonomousDatabase, - ociADB *dbv1alpha1.AutonomousDatabase) (sent bool, err error) { - - if difADB.Spec.Details.AdminPassword.K8sSecret.Name == nil && - difADB.Spec.Details.AdminPassword.OCISecret.OCID == nil { - return false, nil - } - - if ociADB.Status.LifecycleState != database.AutonomousDatabaseLifecycleStateAvailable { - return false, nil - } - - l := logger.WithName("validateAdminPassword") - - l.Info("Sending UpdateAutonomousDatabase request to OCI") - resp, err := r.dbService.UpdateAutonomousDatabaseAdminPassword(*adb.Spec.Details.AutonomousDatabaseOCID, difADB) - if err != nil { - return false, err - } - - adb.UpdateFromOCIADB(resp.AutonomousDatabase) - // Update the admin password fields because they are missing in the ociADB - adb.Spec.Details.AdminPassword = difADB.Spec.Details.AdminPassword - - return true, nil -} - -func (r *AutonomousDatabaseReconciler) validateDbWorkload( - logger logr.Logger, - adb *dbv1alpha1.AutonomousDatabase, - difADB *dbv1alpha1.AutonomousDatabase, - ociADB *dbv1alpha1.AutonomousDatabase) (sent bool, err error) { - - if difADB.Spec.Details.DbWorkload == "" { - return false, nil - } - - if ociADB.Status.LifecycleState != database.AutonomousDatabaseLifecycleStateAvailable { - return false, nil - } - - l := logger.WithName("validateDbWorkload") - - l.Info("Sending UpdateAutonomousDatabase request to OCI") - resp, err := r.dbService.UpdateAutonomousDatabaseDBWorkload(*adb.Spec.Details.AutonomousDatabaseOCID, difADB) - if err != nil { - return false, err - } - - adb.UpdateFromOCIADB(resp.AutonomousDatabase) - - return true, nil -} - -func (r *AutonomousDatabaseReconciler) validateLicenseModel( - logger logr.Logger, - adb *dbv1alpha1.AutonomousDatabase, - difADB *dbv1alpha1.AutonomousDatabase, - ociADB *dbv1alpha1.AutonomousDatabase) (sent bool, err error) { - - if difADB.Spec.Details.LicenseModel == "" { - return false, nil - } - - if ociADB.Status.LifecycleState != database.AutonomousDatabaseLifecycleStateAvailable { - return false, nil - } - - l := logger.WithName("validateLicenseModel") - - l.Info("Sending UpdateAutonomousDatabase request to OCI") - resp, err := r.dbService.UpdateAutonomousDatabaseLicenseModel(*adb.Spec.Details.AutonomousDatabaseOCID, difADB) - if err != nil { - return false, err - } - - adb.UpdateFromOCIADB(resp.AutonomousDatabase) - - return true, nil -} - -func (r *AutonomousDatabaseReconciler) validateScalingFields( - logger logr.Logger, - adb *dbv1alpha1.AutonomousDatabase, - difADB *dbv1alpha1.AutonomousDatabase, - ociADB *dbv1alpha1.AutonomousDatabase) (sent bool, err error) { - - if difADB.Spec.Details.DataStorageSizeInTBs == nil && - difADB.Spec.Details.CPUCoreCount == nil && - difADB.Spec.Details.IsAutoScalingEnabled == nil { - return false, nil - } - - if ociADB.Status.LifecycleState != database.AutonomousDatabaseLifecycleStateAvailable { - return false, nil - } - - l := logger.WithName("validateScalingFields") - - l.Info("Sending UpdateAutonomousDatabase request to OCI") - resp, err := r.dbService.UpdateAutonomousDatabaseScalingFields(*adb.Spec.Details.AutonomousDatabaseOCID, difADB) - if err != nil { - return false, err - } - - adb.UpdateFromOCIADB(resp.AutonomousDatabase) - - return true, nil -} - -func (r *AutonomousDatabaseReconciler) validateDesiredLifecycleState( - logger logr.Logger, - adb *dbv1alpha1.AutonomousDatabase, - difADB *dbv1alpha1.AutonomousDatabase, - ociADB *dbv1alpha1.AutonomousDatabase) (sent bool, exit bool, err error) { - - if difADB.Spec.Details.LifecycleState == "" { - return false, false, nil - } - - if difADB.Spec.Details.LifecycleState == database.AutonomousDatabaseLifecycleStateTerminated { - // OCI only allows terminate operation when the ADB is in an valid state, otherwise requeue the reconcile. - if !dbv1alpha1.ValidADBTerminateState(adb.Status.LifecycleState) { - return false, false, nil - } - } else if dbv1alpha1.IsADBIntermediateState(ociADB.Status.LifecycleState) { - // Other lifecycle management operation; requeue the reconcile if it's in an intermediate state - return false, false, nil - } - - l := logger.WithName("validateDesiredLifecycleState") - - switch difADB.Spec.Details.LifecycleState { - case database.AutonomousDatabaseLifecycleStateAvailable: - l.Info("Sending StartAutonomousDatabase request to OCI") - - resp, err := r.dbService.StartAutonomousDatabase(*adb.Spec.Details.AutonomousDatabaseOCID) - if err != nil { - return false, false, err - } - - adb.Status.LifecycleState = resp.LifecycleState - case database.AutonomousDatabaseLifecycleStateStopped: - l.Info("Sending StopAutonomousDatabase request to OCI") - - resp, err := r.dbService.StopAutonomousDatabase(*adb.Spec.Details.AutonomousDatabaseOCID) - if err != nil { - return false, false, err - } - - adb.Status.LifecycleState = resp.LifecycleState - case database.AutonomousDatabaseLifecycleStateTerminated: - l.Info("Sending DeleteAutonomousDatabase request to OCI") - - _, err := r.dbService.DeleteAutonomousDatabase(*adb.Spec.Details.AutonomousDatabaseOCID) - if err != nil { - return false, false, err - } - - adb.Status.LifecycleState = database.AutonomousDatabaseLifecycleStateTerminating - - // The controller allows terminate during some intermediate states. - // Exit the reconcile because there is already another ongoing reconcile. - if dbv1alpha1.IsADBIntermediateState(ociADB.Status.LifecycleState) { - l.Info("Terminating an ADB which is in an intermediate state; exit reconcile") - return true, true, nil - } - default: - return false, false, errors.New("unknown lifecycleState") - } - - return true, false, nil -} - -// The logic of updating the network access configurations is as follows: -// -// 1. Shared databases: -// If the network access type changes -// a. to PUBLIC: -// was RESTRICTED: re-enable IsMTLSConnectionRequired if its not. Then set WhitelistedIps to an array with a single empty string entry. -// was PRIVATE: re-enable IsMTLSConnectionRequired if its not. Then set PrivateEndpointLabel to an emtpy string. -// b. to RESTRICTED: -// was PUBLIC: set WhitelistedIps to desired IPs/CIDR blocks/VCN OCID. Configure the IsMTLSConnectionRequired settings if it is set to disabled. -// was PRIVATE: re-enable IsMTLSConnectionRequired if its not. Set the type to PUBLIC first, and then configure the WhitelistedIps. Finally resume the IsMTLSConnectionRequired settings if it was, or is configured as disabled. -// c. to PRIVATE: -// was PUBLIC: set subnetOCID and nsgOCIDs. Configure the IsMTLSConnectionRequired settings if it is set. -// was RESTRICTED: set subnetOCID and nsgOCIDs. Configure the IsMTLSConnectionRequired settings if it is set. -// *Note: OCI requires nsgOCIDs to be an empty string rather than nil when we don't want the adb to be included in any network security group. -// -// Otherwise, if the network access type remains the same, apply the network configuration, and then set the IsMTLSConnectionRequired. -// -// 2. Dedicated databases: -// Apply the configs directly -func (r *AutonomousDatabaseReconciler) validateGeneralNetworkAccess( - logger logr.Logger, - adb *dbv1alpha1.AutonomousDatabase, - difADB *dbv1alpha1.AutonomousDatabase, - ociADB *dbv1alpha1.AutonomousDatabase) (sent bool, err error) { - - if difADB.Spec.Details.NetworkAccess.AccessType == "" && - difADB.Spec.Details.NetworkAccess.IsAccessControlEnabled == nil && - difADB.Spec.Details.NetworkAccess.AccessControlList == nil && - difADB.Spec.Details.NetworkAccess.IsMTLSConnectionRequired == nil && - difADB.Spec.Details.NetworkAccess.PrivateEndpoint.SubnetOCID == nil && - difADB.Spec.Details.NetworkAccess.PrivateEndpoint.NsgOCIDs == nil && - difADB.Spec.Details.NetworkAccess.PrivateEndpoint.HostnamePrefix == nil { - return false, nil - } - - if ociADB.Status.LifecycleState != database.AutonomousDatabaseLifecycleStateAvailable { - return false, nil - } - - l := logger.WithName("validateGeneralNetworkAccess") - - if !*adb.Spec.Details.IsDedicated { - var lastAccessType = ociADB.Spec.Details.NetworkAccess.AccessType - var difAccessType = difADB.Spec.Details.NetworkAccess.AccessType - - if difAccessType != "" { - switch difAccessType { - case dbv1alpha1.NetworkAccessTypePublic: - l.Info("Configuring network access type to PUBLIC") - // OCI validation requires IsMTLSConnectionRequired to be enabled before changing the network access type to PUBLIC - if !*ociADB.Spec.Details.NetworkAccess.IsMTLSConnectionRequired { - if err := r.setMTLSRequired(logger, adb); err != nil { - return false, err - } - return true, nil - } - - if err := r.setNetworkAccessPublic(logger, ociADB.Spec.Details.NetworkAccess.AccessType, adb); err != nil { - return false, err - } - return true, nil - case dbv1alpha1.NetworkAccessTypeRestricted: - l.Info("Configuring network access type to RESTRICTED") - // If the access type was PRIVATE, then OCI validation requires IsMTLSConnectionRequired - // to be enabled before setting ACL. Also, we can only change the network access type from - // PRIVATE to PUBLIC, so the steps are PRIVATE->(requeue)->PUBLIC->(requeue)->RESTRICTED. - if lastAccessType == dbv1alpha1.NetworkAccessTypePrivate { - if !*ociADB.Spec.Details.NetworkAccess.IsMTLSConnectionRequired { - if err := r.setMTLSRequired(logger, adb); err != nil { - return false, err - } - return true, nil - } - - if err := r.setNetworkAccessPublic(logger, ociADB.Spec.Details.NetworkAccess.AccessType, adb); err != nil { - return false, err - } - return true, nil - } - - sent, err := r.validateNetworkAccess(logger, adb, difADB, ociADB) - if err != nil { - return false, err - } - if sent { - return true, nil - } - - sent, err = r.validateMTLS(logger, adb, difADB, ociADB) - if err != nil { - return false, err - } - if sent { - return true, nil - } - case dbv1alpha1.NetworkAccessTypePrivate: - l.Info("Configuring network access type to PRIVATE") - - sent, err := r.validateNetworkAccess(logger, adb, difADB, ociADB) - if err != nil { - return false, err - } - if sent { - return true, nil - } - - sent, err = r.validateMTLS(logger, adb, difADB, ociADB) - if err != nil { - return false, err - } - if sent { - return true, nil - } - } - } else { - // Access type doesn't change - sent, err := r.validateNetworkAccess(logger, adb, difADB, ociADB) - if err != nil { - return false, err - } - if sent { - return true, nil - } - - sent, err = r.validateMTLS(logger, adb, difADB, ociADB) - if err != nil { - return false, err - } - if sent { - return true, nil - } - } - } else { - // Dedicated database - sent, err := r.validateNetworkAccess(logger, adb, difADB, ociADB) + resp, err := r.dbService.UpdateAutonomousDatabase(*adb.Spec.Details.Id, difAdb) if err != nil { - return false, err - } - if sent { - return true, nil + return err } + _ = adb.UpdateFromOciAdb(resp.AutonomousDatabase, true) } - return false, nil -} - -// Set the mTLS to true but not changing the spec -func (r *AutonomousDatabaseReconciler) setMTLSRequired(logger logr.Logger, adb *dbv1alpha1.AutonomousDatabase) error { - l := logger.WithName("setMTLSRequired") - - l.Info("Sending request to OCI to set IsMtlsConnectionRequired to true") - - adb.Spec.Details.NetworkAccess.IsMTLSConnectionRequired = common.Bool(true) - - resp, err := r.dbService.UpdateNetworkAccessMTLSRequired(*adb.Spec.Details.AutonomousDatabaseOCID) - if err != nil { - return err - } - - adb.UpdateFromOCIADB(resp.AutonomousDatabase) - return nil } -func (r *AutonomousDatabaseReconciler) validateMTLS( - logger logr.Logger, - adb *dbv1alpha1.AutonomousDatabase, - difADB *dbv1alpha1.AutonomousDatabase, - ociADB *dbv1alpha1.AutonomousDatabase) (sent bool, err error) { - - if difADB.Spec.Details.NetworkAccess.IsMTLSConnectionRequired == nil { - return false, nil - } - - l := logger.WithName("validateMTLS") - - l.Info("Sending request to OCI to configure IsMtlsConnectionRequired") - - resp, err := r.dbService.UpdateNetworkAccessMTLS(*adb.Spec.Details.AutonomousDatabaseOCID, difADB) - if err != nil { - return false, err - } - - adb.UpdateFromOCIADB(resp.AutonomousDatabase) - - return true, nil -} - -func (r *AutonomousDatabaseReconciler) setNetworkAccessPublic(logger logr.Logger, lastAcessType dbv1alpha1.NetworkAccessTypeEnum, adb *dbv1alpha1.AutonomousDatabase) error { - adb.Spec.Details.NetworkAccess.AccessType = dbv1alpha1.NetworkAccessTypePublic - adb.Spec.Details.NetworkAccess.AccessControlList = nil - adb.Spec.Details.NetworkAccess.PrivateEndpoint.HostnamePrefix = common.String("") - adb.Spec.Details.NetworkAccess.PrivateEndpoint.NsgOCIDs = nil - adb.Spec.Details.NetworkAccess.PrivateEndpoint.SubnetOCID = nil - - l := logger.WithName("setNetworkAccessPublic") - - l.Info("Sending request to OCI to configure network access options to PUBLIC") - - resp, err := r.dbService.UpdateNetworkAccessPublic(lastAcessType, *adb.Spec.Details.AutonomousDatabaseOCID) - if err != nil { - return err - } - - adb.UpdateFromOCIADB(resp.AutonomousDatabase) - - return nil -} - -func (r *AutonomousDatabaseReconciler) validateNetworkAccess( - logger logr.Logger, - adb *dbv1alpha1.AutonomousDatabase, - difADB *dbv1alpha1.AutonomousDatabase, - ociADB *dbv1alpha1.AutonomousDatabase) (sent bool, err error) { - - if difADB.Spec.Details.NetworkAccess.AccessType == "" && - difADB.Spec.Details.NetworkAccess.IsAccessControlEnabled == nil && - difADB.Spec.Details.NetworkAccess.AccessControlList == nil && - difADB.Spec.Details.NetworkAccess.PrivateEndpoint.SubnetOCID == nil && - difADB.Spec.Details.NetworkAccess.PrivateEndpoint.NsgOCIDs == nil && - difADB.Spec.Details.NetworkAccess.PrivateEndpoint.HostnamePrefix == nil { - return false, nil - } - - l := logger.WithName("validateNetworkAccess") - - l.Info("Sending request to OCI to configure network access options") - - // When the network access type is set to PRIVATE, any nil type of nsgOCIDs needs to be set to an empty string, otherwise, OCI SDK returns a 400 error - if difADB.Spec.Details.NetworkAccess.AccessType == dbv1alpha1.NetworkAccessTypePrivate && - difADB.Spec.Details.NetworkAccess.PrivateEndpoint.NsgOCIDs == nil { - difADB.Spec.Details.NetworkAccess.PrivateEndpoint.NsgOCIDs = []string{} - } - - resp, err := r.dbService.UpdateNetworkAccess(*adb.Spec.Details.AutonomousDatabaseOCID, difADB) - if err != nil { - return false, err - } - - adb.UpdateFromOCIADB(resp.AutonomousDatabase) - - return true, nil -} - -func (r *AutonomousDatabaseReconciler) validateWallet(logger logr.Logger, adb *dbv1alpha1.AutonomousDatabase) error { - if adb.Spec.Details.Wallet.Name == nil && - adb.Spec.Details.Wallet.Password.K8sSecret.Name == nil && - adb.Spec.Details.Wallet.Password.OCISecret.OCID == nil { +func (r *AutonomousDatabaseReconciler) validateWallet(logger logr.Logger, adb *dbv4.AutonomousDatabase) error { + if adb.Spec.Wallet.Name == nil && + adb.Spec.Wallet.Password.K8sSecret.Name == nil && + adb.Spec.Wallet.Password.OciSecret.Id == nil { return nil } @@ -1253,10 +673,10 @@ func (r *AutonomousDatabaseReconciler) validateWallet(logger logr.Logger, adb *d // lastSucSpec may be nil if this is the first time entering the reconciliation loop var walletName string - if adb.Spec.Details.Wallet.Name == nil { + if adb.Spec.Wallet.Name == nil { walletName = adb.GetName() + "-instance-wallet" } else { - walletName = *adb.Spec.Details.Wallet.Name + walletName = *adb.Spec.Wallet.Name } secret, err := k8s.FetchSecret(r.KubeClient, adb.GetNamespace(), walletName) @@ -1297,11 +717,11 @@ func (r *AutonomousDatabaseReconciler) validateWallet(logger logr.Logger, adb *d // updateBackupResources get the list of AutonomousDatabasBackups and // create a backup object if it's not found in the same namespace -func (r *AutonomousDatabaseReconciler) syncBackupResources(logger logr.Logger, adb *dbv1alpha1.AutonomousDatabase) error { +func (r *AutonomousDatabaseReconciler) syncBackupResources(logger logr.Logger, adb *dbv4.AutonomousDatabase) error { l := logger.WithName("syncBackupResources") // Get the list of AutonomousDatabaseBackupOCID in the same namespace - backupList, err := k8s.FetchAutonomousDatabaseBackups(r.KubeClient, adb.Namespace) + backupList, err := k8s.FetchAutonomousDatabaseBackups(r.KubeClient, adb.Namespace, adb.Name) if err != nil { return err } @@ -1319,7 +739,7 @@ func (r *AutonomousDatabaseReconciler) syncBackupResources(logger logr.Logger, a } } - resp, err := r.dbService.ListAutonomousDatabaseBackups(*adb.Spec.Details.AutonomousDatabaseOCID) + resp, err := r.dbService.ListAutonomousDatabaseBackups(*adb.Spec.Details.Id) if err != nil { return err } @@ -1370,7 +790,7 @@ func (r *AutonomousDatabaseReconciler) getValidBackupName(displayName string, us return finalName, nil } -func (r *AutonomousDatabaseReconciler) ifBackupExists(backupSummary database.AutonomousDatabaseBackupSummary, curBackupOCIDs map[string]bool, backupList *dbv1alpha1.AutonomousDatabaseBackupList) bool { +func (r *AutonomousDatabaseReconciler) ifBackupExists(backupSummary database.AutonomousDatabaseBackupSummary, curBackupOCIDs map[string]bool, backupList *dbv4.AutonomousDatabaseBackupList) bool { _, ok := curBackupOCIDs[*backupSummary.Id] if ok { return true @@ -1391,3 +811,24 @@ func (r *AutonomousDatabaseReconciler) ifBackupExists(backupSummary database.Aut return false } + +// removeBackupResources remove all the AutonomousDatabasBackups that +// are associated with the adb +func (r *AutonomousDatabaseReconciler) removeBackupResources(logger logr.Logger, adb *dbv4.AutonomousDatabase) error { + l := logger.WithName("removeBackupResources") + + // Get the list of AutonomousDatabaseBackupOCID in the same namespace + backupList, err := k8s.FetchAutonomousDatabaseBackups(r.KubeClient, adb.Namespace, adb.Name) + if err != nil { + return err + } + + for _, backup := range backupList.Items { + if err := r.KubeClient.Delete(context.TODO(), &backup); err != nil { + return err + } + l.Info("Delete AutonomousDatabaseBackup " + backup.Name) + } + + return nil +} diff --git a/controllers/database/autonomousdatabasebackup_controller.go b/controllers/database/autonomousdatabasebackup_controller.go index 7fac9e04..9744f3fb 100644 --- a/controllers/database/autonomousdatabasebackup_controller.go +++ b/controllers/database/autonomousdatabasebackup_controller.go @@ -54,8 +54,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/predicate" - dbv1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" - "github.com/oracle/oracle-database-operator/commons/adb_family" + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" + adbfamily "github.com/oracle/oracle-database-operator/commons/adb_family" "github.com/oracle/oracle-database-operator/commons/oci" ) @@ -72,7 +72,7 @@ type AutonomousDatabaseBackupReconciler struct { // SetupWithManager sets up the controller with the Manager. func (r *AutonomousDatabaseBackupReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&dbv1alpha1.AutonomousDatabaseBackup{}). + For(&dbv4.AutonomousDatabaseBackup{}). WithEventFilter(predicate.GenerationChangedPredicate{}). WithOptions(controller.Options{MaxConcurrentReconciles: 100}). // ReconcileHandler is never invoked concurrently with the same object. Complete(r) @@ -85,14 +85,14 @@ func (r *AutonomousDatabaseBackupReconciler) SetupWithManager(mgr ctrl.Manager) func (r *AutonomousDatabaseBackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { logger := r.Log.WithValues("Namespace/Name", req.NamespacedName) - backup := &dbv1alpha1.AutonomousDatabaseBackup{} + backup := &dbv4.AutonomousDatabaseBackup{} if err := r.KubeClient.Get(context.TODO(), req.NamespacedName, backup); err != nil { // Ignore not-found errors, since they can't be fixed by an immediate requeue. // No need to change the since we don't know if we obtain the object. if apiErrors.IsNotFound(err) { return emptyResult, nil } - // Failed to get ADBBackup, so we don't need to update the status + // Failed to get AutonomousDatabaseBackup, so we don't need to update the status return emptyResult, err } @@ -100,7 +100,7 @@ func (r *AutonomousDatabaseBackupReconciler) Reconcile(ctx context.Context, req * Look up the owner AutonomousDatabase and set the ownerReference * if the owner hasn't been set yet. ******************************************************************/ - adbOCID, err := r.verifyTargetADB(backup) + adbOCID, err := r.verifyTargetAdb(backup) if err != nil { return r.manageError(backup, err) } @@ -133,10 +133,10 @@ func (r *AutonomousDatabaseBackupReconciler) Reconcile(ctx context.Context, req /****************************************************************** * Requeue if the Backup is in an intermediate state - * No-op if the ADB OCID is nil + * No-op if the Autonomous Database OCID is nil * To get the latest status, execute before all the reconcile logic ******************************************************************/ - if dbv1alpha1.IsBackupIntermediateState(backup.Status.LifecycleState) { + if dbv4.IsBackupIntermediateState(backup.Status.LifecycleState) { logger.WithName("IsIntermediateState").Info("Current lifecycleState is " + string(backup.Status.LifecycleState) + "; reconcile queued") return requeueResult, nil } @@ -187,7 +187,7 @@ func (r *AutonomousDatabaseBackupReconciler) Reconcile(ctx context.Context, req return r.manageError(backup, err) } - if dbv1alpha1.IsBackupIntermediateState(backup.Status.LifecycleState) { + if dbv4.IsBackupIntermediateState(backup.Status.LifecycleState) { logger.WithName("IsIntermediateState").Info("Reconcile queued") return requeueResult, nil } @@ -198,7 +198,7 @@ func (r *AutonomousDatabaseBackupReconciler) Reconcile(ctx context.Context, req } // setOwnerAutonomousDatabase sets the owner of the AutonomousDatabaseBackup if the AutonomousDatabase resource with the same database OCID is found -func (r *AutonomousDatabaseBackupReconciler) setOwnerAutonomousDatabase(backup *dbv1alpha1.AutonomousDatabaseBackup, adb *dbv1alpha1.AutonomousDatabase) error { +func (r *AutonomousDatabaseBackupReconciler) setOwnerAutonomousDatabase(backup *dbv4.AutonomousDatabaseBackup, adb *dbv4.AutonomousDatabase) error { logger := r.Log.WithName("set-owner-reference") controllerutil.SetOwnerReference(adb, backup, r.Scheme) @@ -210,43 +210,43 @@ func (r *AutonomousDatabaseBackupReconciler) setOwnerAutonomousDatabase(backup * return nil } -// verifyTargetADB searches if the target ADB is in the cluster, and set the owner reference to the ADB if it exists. -// The function returns the OCID of the target ADB. -func (r *AutonomousDatabaseBackupReconciler) verifyTargetADB(backup *dbv1alpha1.AutonomousDatabaseBackup) (string, error) { +// verifyTargetAdb searches if the target AutonomousDatabase is in the cluster, and set the owner reference to that AutonomousDatabase if it exists. +// The function returns the OCID of the target AutonomousDatabase. +func (r *AutonomousDatabaseBackupReconciler) verifyTargetAdb(backup *dbv4.AutonomousDatabaseBackup) (string, error) { // Get the target ADB OCID and the ADB resource - ownerADB, err := adbfamily.VerifyTargetADB(r.KubeClient, backup.Spec.Target, backup.Namespace) + ownerAdb, err := adbfamily.VerifyTargetAdb(r.KubeClient, backup.Spec.Target, backup.Namespace) if err != nil { return "", err } // Set the owner reference if needed - if len(backup.GetOwnerReferences()) == 0 && ownerADB != nil { - if err := r.setOwnerAutonomousDatabase(backup, ownerADB); err != nil { + if len(backup.GetOwnerReferences()) == 0 && ownerAdb != nil { + if err := r.setOwnerAutonomousDatabase(backup, ownerAdb); err != nil { return "", err } } - if backup.Spec.Target.OCIADB.OCID != nil { - return *backup.Spec.Target.OCIADB.OCID, nil + if backup.Spec.Target.OciAdb.OCID != nil { + return *backup.Spec.Target.OciAdb.OCID, nil } - if ownerADB != nil && ownerADB.Spec.Details.AutonomousDatabaseOCID != nil { - return *ownerADB.Spec.Details.AutonomousDatabaseOCID, nil + if ownerAdb != nil && ownerAdb.Spec.Details.Id != nil { + return *ownerAdb.Spec.Details.Id, nil } - return "", errors.New("cannot get the OCID of the targetADB") + return "", errors.New("cannot get the OCID of the target AutonomousDatabase") } -func (r *AutonomousDatabaseBackupReconciler) setupOCIClients(backup *dbv1alpha1.AutonomousDatabaseBackup) error { +func (r *AutonomousDatabaseBackupReconciler) setupOCIClients(backup *dbv4.AutonomousDatabaseBackup) error { var err error - authData := oci.APIKeyAuth{ + authData := oci.ApiKeyAuth{ ConfigMapName: backup.Spec.OCIConfig.ConfigMapName, SecretName: backup.Spec.OCIConfig.SecretName, Namespace: backup.GetNamespace(), } - provider, err := oci.GetOCIProvider(r.KubeClient, authData) + provider, err := oci.GetOciProvider(r.KubeClient, authData) if err != nil { return err } @@ -259,7 +259,7 @@ func (r *AutonomousDatabaseBackupReconciler) setupOCIClients(backup *dbv1alpha1. return nil } -func (r *AutonomousDatabaseBackupReconciler) manageError(backup *dbv1alpha1.AutonomousDatabaseBackup, issue error) (ctrl.Result, error) { +func (r *AutonomousDatabaseBackupReconciler) manageError(backup *dbv4.AutonomousDatabaseBackup, issue error) (ctrl.Result, error) { // Send event r.Recorder.Event(backup, corev1.EventTypeWarning, "ReconcileFailed", issue.Error()) diff --git a/controllers/database/autonomousdatabaserestore_controller.go b/controllers/database/autonomousdatabaserestore_controller.go index 254731bb..61b84c5d 100644 --- a/controllers/database/autonomousdatabaserestore_controller.go +++ b/controllers/database/autonomousdatabaserestore_controller.go @@ -54,8 +54,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "github.com/oracle/oci-go-sdk/v65/common" - dbv1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" - "github.com/oracle/oracle-database-operator/commons/adb_family" + dbv4 "github.com/oracle/oracle-database-operator/apis/database/v4" + adbfamily "github.com/oracle/oracle-database-operator/commons/adb_family" "github.com/oracle/oracle-database-operator/commons/k8s" "github.com/oracle/oracle-database-operator/commons/oci" ) @@ -74,7 +74,7 @@ type AutonomousDatabaseRestoreReconciler struct { // SetupWithManager sets up the controller with the Manager. func (r *AutonomousDatabaseRestoreReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&dbv1alpha1.AutonomousDatabaseRestore{}). + For(&dbv4.AutonomousDatabaseRestore{}). WithEventFilter(predicate.GenerationChangedPredicate{}). Complete(r) } @@ -95,14 +95,14 @@ func (r *AutonomousDatabaseRestoreReconciler) SetupWithManager(mgr ctrl.Manager) func (r *AutonomousDatabaseRestoreReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { logger := r.Log.WithValues("Namespace/Name", req.NamespacedName) - restore := &dbv1alpha1.AutonomousDatabaseRestore{} + restore := &dbv4.AutonomousDatabaseRestore{} if err := r.KubeClient.Get(context.TODO(), req.NamespacedName, restore); err != nil { // Ignore not-found errors, since they can't be fixed by an immediate requeue. // No need to change since we don't know if we obtain the object. if apiErrors.IsNotFound(err) { return emptyResult, nil } - // Failed to get ADBRestore, so we don't need to update the status + // Failed to get the resource return emptyResult, err } @@ -110,7 +110,7 @@ func (r *AutonomousDatabaseRestoreReconciler) Reconcile(ctx context.Context, req * Look up the owner AutonomousDatabase and set the ownerReference * if the owner hasn't been set yet. ******************************************************************/ - adbOCID, err := r.verifyTargetADB(restore) + adbOCID, err := r.verifyTargetAdb(restore) if err != nil { return r.manageError(restore, err) } @@ -172,7 +172,7 @@ func (r *AutonomousDatabaseRestoreReconciler) Reconcile(ctx context.Context, req } // Requeue if it's in intermediate state - if dbv1alpha1.IsRestoreIntermediateState(restore.Status.Status) { + if dbv4.IsRestoreIntermediateState(restore.Status.Status) { logger.WithName("IsIntermediateState").Info("Current status is " + string(restore.Status.Status) + "; reconcile queued") return requeueResult, nil } @@ -182,10 +182,10 @@ func (r *AutonomousDatabaseRestoreReconciler) Reconcile(ctx context.Context, req return emptyResult, nil } -func (r *AutonomousDatabaseRestoreReconciler) getRestoreSDKTime(restore *dbv1alpha1.AutonomousDatabaseRestore) (*common.SDKTime, error) { - if restore.Spec.Source.K8sADBBackup.Name != nil { // restore using backupName - backup := &dbv1alpha1.AutonomousDatabaseBackup{} - if err := k8s.FetchResource(r.KubeClient, restore.Namespace, *restore.Spec.Source.K8sADBBackup.Name, backup); err != nil { +func (r *AutonomousDatabaseRestoreReconciler) getRestoreSDKTime(restore *dbv4.AutonomousDatabaseRestore) (*common.SDKTime, error) { + if restore.Spec.Source.K8sAdbBackup.Name != nil { // restore using backupName + backup := &dbv4.AutonomousDatabaseBackup{} + if err := k8s.FetchResource(r.KubeClient, restore.Namespace, *restore.Spec.Source.K8sAdbBackup.Name, backup); err != nil { return nil, err } @@ -207,7 +207,7 @@ func (r *AutonomousDatabaseRestoreReconciler) getRestoreSDKTime(restore *dbv1alp } // setOwnerAutonomousDatabase sets the owner of the AutonomousDatabaseBackup if the AutonomousDatabase resource with the same database OCID is found -func (r *AutonomousDatabaseRestoreReconciler) setOwnerAutonomousDatabase(restore *dbv1alpha1.AutonomousDatabaseRestore, adb *dbv1alpha1.AutonomousDatabase) error { +func (r *AutonomousDatabaseRestoreReconciler) setOwnerAutonomousDatabase(restore *dbv4.AutonomousDatabaseRestore, adb *dbv4.AutonomousDatabase) error { logger := r.Log.WithName("set-owner-reference") controllerutil.SetOwnerReference(adb, restore, r.Scheme) @@ -219,43 +219,43 @@ func (r *AutonomousDatabaseRestoreReconciler) setOwnerAutonomousDatabase(restore return nil } -// verifyTargetADB searches if the target ADB is in the cluster, and set the owner reference to the ADB if it exists. +// verifyTargetAdb searches if the target ADB is in the cluster, and set the owner reference to the ADB if it exists. // The function returns the OCID of the target ADB. -func (r *AutonomousDatabaseRestoreReconciler) verifyTargetADB(restore *dbv1alpha1.AutonomousDatabaseRestore) (string, error) { +func (r *AutonomousDatabaseRestoreReconciler) verifyTargetAdb(restore *dbv4.AutonomousDatabaseRestore) (string, error) { // Get the target ADB OCID and the ADB resource - ownerADB, err := adbfamily.VerifyTargetADB(r.KubeClient, restore.Spec.Target, restore.Namespace) + ownerAdb, err := adbfamily.VerifyTargetAdb(r.KubeClient, restore.Spec.Target, restore.Namespace) if err != nil { return "", err } // Set the owner reference if needed - if len(restore.GetOwnerReferences()) == 0 && ownerADB != nil { - if err := r.setOwnerAutonomousDatabase(restore, ownerADB); err != nil { + if len(restore.GetOwnerReferences()) == 0 && ownerAdb != nil { + if err := r.setOwnerAutonomousDatabase(restore, ownerAdb); err != nil { return "", err } } - if restore.Spec.Target.OCIADB.OCID != nil { - return *restore.Spec.Target.OCIADB.OCID, nil + if restore.Spec.Target.OciAdb.OCID != nil { + return *restore.Spec.Target.OciAdb.OCID, nil } - if ownerADB != nil && ownerADB.Spec.Details.AutonomousDatabaseOCID != nil { - return *ownerADB.Spec.Details.AutonomousDatabaseOCID, nil + if ownerAdb != nil && ownerAdb.Spec.Details.Id != nil { + return *ownerAdb.Spec.Details.Id, nil } - return "", errors.New("cannot get the OCID of the targetADB") + return "", errors.New("cannot get the OCID of the target Autonomous Database") } -func (r *AutonomousDatabaseRestoreReconciler) setupOCIClients(restore *dbv1alpha1.AutonomousDatabaseRestore) error { +func (r *AutonomousDatabaseRestoreReconciler) setupOCIClients(restore *dbv4.AutonomousDatabaseRestore) error { var err error - authData := oci.APIKeyAuth{ + authData := oci.ApiKeyAuth{ ConfigMapName: restore.Spec.OCIConfig.ConfigMapName, SecretName: restore.Spec.OCIConfig.SecretName, Namespace: restore.GetNamespace(), } - provider, err := oci.GetOCIProvider(r.KubeClient, authData) + provider, err := oci.GetOciProvider(r.KubeClient, authData) if err != nil { return err } @@ -274,7 +274,7 @@ func (r *AutonomousDatabaseRestoreReconciler) setupOCIClients(restore *dbv1alpha } // manageError doesn't return the error so that the request won't be requeued -func (r *AutonomousDatabaseRestoreReconciler) manageError(restore *dbv1alpha1.AutonomousDatabaseRestore, issue error) (ctrl.Result, error) { +func (r *AutonomousDatabaseRestoreReconciler) manageError(restore *dbv4.AutonomousDatabaseRestore, issue error) (ctrl.Result, error) { // Send event r.Recorder.Event(restore, corev1.EventTypeWarning, "ReconcileFailed", issue.Error()) diff --git a/controllers/database/cdb_controller.go b/controllers/database/cdb_controller.go index 5e6c0aca..6c5fc747 100644 --- a/controllers/database/cdb_controller.go +++ b/controllers/database/cdb_controller.go @@ -40,7 +40,9 @@ package controllers import ( "context" + "encoding/json" "errors" + "fmt" //"fmt" "strconv" @@ -64,7 +66,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" - dbapi "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + dbapi "github.com/oracle/oracle-database-operator/apis/database/v4" dbcommons "github.com/oracle/oracle-database-operator/commons/database" ) @@ -506,6 +508,17 @@ func (r *CDBReconciler) createPodSpec(cdb *dbapi.CDB) corev1.PodSpec { Name: "WEBSERVER_PASSWORD_KEY", Value: cdb.Spec.WebServerPwd.Secret.Key, }, + { + Name: "R1", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: cdb.Spec.CDBPriKey.Secret.SecretName, + }, + Key: cdb.Spec.CDBPriKey.Secret.Key, + }, + }, + }, } }(), }}, @@ -749,56 +762,46 @@ func (r *CDBReconciler) createSvcSpec(cdb *dbapi.CDB) *corev1.Service { /* ************************************************ - Check CDB deletion - /*********************************************** + +/*********************************************** */ func (r *CDBReconciler) manageCDBDeletion(ctx context.Context, req ctrl.Request, cdb *dbapi.CDB) error { log := r.Log.WithValues("manageCDBDeletion", req.NamespacedName) - // Check if the PDB instance is marked to be deleted, which is - // indicated by the deletion timestamp being set. - isCDBMarkedToBeDeleted := cdb.GetDeletionTimestamp() != nil - if isCDBMarkedToBeDeleted { - log.Info("Marked to be deleted") + /* REGISTER FINALIZER */ + if cdb.ObjectMeta.DeletionTimestamp.IsZero() { + if !controllerutil.ContainsFinalizer(cdb, CDBFinalizer) { + controllerutil.AddFinalizer(cdb, CDBFinalizer) + if err := r.Update(ctx, cdb); err != nil { + return err + } + } + + } else { + log.Info("cdb set to be deleted") cdb.Status.Phase = cdbPhaseDelete cdb.Status.Status = true r.Status().Update(ctx, cdb) + if controllerutil.ContainsFinalizer(cdb, CDBFinalizer) { - // Run finalization logic for CDBFinalizer. If the - // finalization logic fails, don't remove the finalizer so - // that we can retry during the next reconciliation. - err := r.deleteCDBInstance(ctx, req, cdb) - if err != nil { - log.Info("Could not delete CDB Resource", "CDB Name", cdb.Spec.CDBName, "err", err.Error()) + + if err := r.DeletePDBS(ctx, req, cdb); err != nil { + log.Info("Cannot delete pdbs") return err } - // Remove CDBFinalizer. Once all finalizers have been - // removed, the object will be deleted. - log.Info("Removing finalizer") controllerutil.RemoveFinalizer(cdb, CDBFinalizer) - err = r.Update(ctx, cdb) - if err != nil { - log.Info("Could not remove finalizer", "err", err.Error()) + if err := r.Update(ctx, cdb); err != nil { return err } - - log.Info("Successfully removed CDB Resource") - return nil } - } - - // Add finalizer for this CR - if !controllerutil.ContainsFinalizer(cdb, CDBFinalizer) { - log.Info("Adding finalizer") - cdb.Status.Phase = cdbPhaseInit - cdb.Status.Status = false - controllerutil.AddFinalizer(cdb, CDBFinalizer) - err := r.Update(ctx, cdb) + err := r.deleteCDBInstance(ctx, req, cdb) if err != nil { - log.Info("Could not add finalizer", "err", err.Error()) + log.Info("Could not delete CDB Resource", "CDB Name", cdb.Spec.CDBName, "err", err.Error()) return err } + } return nil } @@ -852,7 +855,8 @@ func (r *CDBReconciler) deleteCDBInstance(ctx context.Context, req ctrl.Request, /* ************************************************ - Get Secret Key for a Secret Name - /*********************************************** + +/*********************************************** */ func (r *CDBReconciler) verifySecrets(ctx context.Context, req ctrl.Request, cdb *dbapi.CDB) error { @@ -876,6 +880,9 @@ func (r *CDBReconciler) verifySecrets(ctx context.Context, req ctrl.Request, cdb if err := r.checkSecret(ctx, req, cdb, cdb.Spec.WebServerPwd.Secret.SecretName); err != nil { return err } + if err := r.checkSecret(ctx, req, cdb, cdb.Spec.CDBPriKey.Secret.SecretName); err != nil { + return err + } cdb.Status.Msg = "" log.Info("Verified secrets successfully") @@ -885,7 +892,8 @@ func (r *CDBReconciler) verifySecrets(ctx context.Context, req ctrl.Request, cdb /* ************************************************ - Get Secret Key for a Secret Name - /*********************************************** + +/*********************************************** */ func (r *CDBReconciler) checkSecret(ctx context.Context, req ctrl.Request, cdb *dbapi.CDB, secretName string) error { @@ -909,7 +917,8 @@ func (r *CDBReconciler) checkSecret(ctx context.Context, req ctrl.Request, cdb * /* ************************************************ - Delete Secrets - /*********************************************** + +/*********************************************** */ func (r *CDBReconciler) deleteSecrets(ctx context.Context, req ctrl.Request, cdb *dbapi.CDB) { @@ -966,11 +975,104 @@ func (r *CDBReconciler) deleteSecrets(ctx context.Context, req ctrl.Request, cdb } } +/* Delete cascade option */ + /* ************************************************************* - SetupWithManager sets up the controller with the Manager. - /************************************************************ +/************************************************************ */ + +func (r *CDBReconciler) DeletePDBS(ctx context.Context, req ctrl.Request, cdb *dbapi.CDB) error { + log := r.Log.WithValues("DeletePDBS", req.NamespacedName) + + /* =================== DELETE CASCADE ================ */ + if cdb.Spec.DeletePDBCascade == true { + log.Info("DELETE PDB CASCADE OPTION") + pdbList := &dbapi.PDBList{} + listOpts := []client.ListOption{} + err := r.List(ctx, pdbList, listOpts...) + if err != nil { + log.Info("Failed to get the list of pdbs") + } + + var url string + if err == nil { + for _, pdbitem := range pdbList.Items { + log.Info("pdbitem.Spec.CDBName : " + pdbitem.Spec.CDBName) + log.Info("pdbitem.Spec.CDBNamespace: " + pdbitem.Spec.CDBNamespace) + log.Info("cdb.Spec.CDBName : " + cdb.Spec.CDBName) + log.Info("cdb.Namespace : " + cdb.Namespace) + if pdbitem.Spec.CDBName == cdb.Spec.CDBName && pdbitem.Spec.CDBNamespace == cdb.Namespace { + fmt.Printf("DeletePDBS Call Delete function for %s %s\n", pdbitem.Name, pdbitem.Spec.PDBName) + + var objmap map[string]interface{} /* Used for the return payload */ + values := map[string]string{ + "state": "CLOSE", + "modifyOption": "IMMEDIATE", + "getScript": "FALSE", + } + + //url := "https://" + pdbitem.Spec.CDBResName + "-cdb." + pdbitem.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/database/pdbs/" + pdbitem.Spec.PDBName + url = "https://" + pdbitem.Spec.CDBResName + "-ords." + pdbitem.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdbitem.Spec.PDBName + "/status" + + log.Info("callAPI(URL):" + url) + log.Info("pdbitem.Status.OpenMode" + pdbitem.Status.OpenMode) + + if pdbitem.Status.OpenMode != "MOUNTED" { + + log.Info("Force pdb closure") + respData, errapi := NewCallApi(r, ctx, req, &pdbitem, url, values, "POST") + + fmt.Printf("Debug NEWCALL:%s\n", respData) + if err := json.Unmarshal([]byte(respData), &objmap); err != nil { + log.Error(err, "failed to get respData from callAPI", "err", err.Error()) + return err + } + + if errapi != nil { + log.Error(err, "callAPI cannot close pdb "+pdbitem.Spec.PDBName, "err", err.Error()) + return err + } + + r.Recorder.Eventf(cdb, corev1.EventTypeNormal, "close pdb", "pdbname=%s", pdbitem.Spec.PDBName) + } + + /* start dropping pdb */ + log.Info("Drop pluggable database") + values = map[string]string{ + "action": "INCLUDING", + "getScript": "FALSE", + } + url = "https://" + pdbitem.Spec.CDBResName + "-ords." + pdbitem.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdbitem.Spec.PDBName + "/" + respData, errapi := NewCallApi(r, ctx, req, &pdbitem, url, values, "DELETE") + + if err := json.Unmarshal([]byte(respData), &objmap); err != nil { + log.Error(err, "failed to get respData from callAPI", "err", err.Error()) + return err + } + + if errapi != nil { + log.Error(err, "callAPI cannot drop pdb "+pdbitem.Spec.PDBName, "err", err.Error()) + return err + } + r.Recorder.Eventf(cdb, corev1.EventTypeNormal, "drop pdb", "pdbname=%s", pdbitem.Spec.PDBName) + + err = r.Delete(context.Background(), &pdbitem, client.GracePeriodSeconds(0)) + if err != nil { + log.Info("Could not delete PDB resource", "err", err.Error()) + return err + } + + } /* check pdb name */ + } /* end of loop */ + } + + } + /* ================================================ */ + return nil +} + func (r *CDBReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&dbapi.CDB{}). diff --git a/controllers/database/dataguardbroker_controller.go b/controllers/database/dataguardbroker_controller.go deleted file mode 100644 index 9faaefd2..00000000 --- a/controllers/database/dataguardbroker_controller.go +++ /dev/null @@ -1,1199 +0,0 @@ -/* -** Copyright (c) 2023 Oracle and/or its affiliates. -** -** The Universal Permissive License (UPL), Version 1.0 -** -** Subject to the condition set forth below, permission is hereby granted to any -** person obtaining a copy of this software, associated documentation and/or data -** (collectively the "Software"), free of charge and under any and all copyright -** rights in the Software, and any and all patent rights owned or freely -** licensable by each licensor hereunder covering either (i) the unmodified -** Software as contributed to or provided by such licensor, or (ii) the Larger -** Works (as defined below), to deal in both -** -** (a) the Software, and -** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if -** one is included with the Software (each a "Larger Work" to which the Software -** is contributed by such licensors), -** -** without restriction, including without limitation the rights to copy, create -** derivative works of, display, perform, and distribute the Software and make, -** use, sell, offer for sale, import, export, have made, and have sold the -** Software and the Larger Work(s), and to sublicense the foregoing rights on -** either these or other terms. -** -** This license is subject to the following condition: -** The above copyright notice and either this complete permission notice or at -** a minimum a reference to the UPL must be included in all copies or -** substantial portions of the Software. -** -** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -** SOFTWARE. - */ - -package controllers - -import ( - "context" - "errors" - "fmt" - "strings" - "time" - - "github.com/go-logr/logr" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/record" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - - dbapi "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" - dbcommons "github.com/oracle/oracle-database-operator/commons/database" -) - -// DataguardBrokerReconciler reconciles a DataguardBroker object -type DataguardBrokerReconciler struct { - client.Client - Log logr.Logger - Scheme *runtime.Scheme - Config *rest.Config - Recorder record.EventRecorder -} - -const dataguardBrokerFinalizer = "database.oracle.com/dataguardbrokerfinalizer" - -//+kubebuilder:rbac:groups=database.oracle.com,resources=dataguardbrokers,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=database.oracle.com,resources=dataguardbrokers/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=database.oracle.com,resources=dataguardbrokers/finalizers,verbs=update -//+kubebuilder:rbac:groups="",resources=pods;pods/log;pods/exec;persistentvolumeclaims;services,verbs=create;delete;get;list;patch;update;watch -//+kubebuilder:rbac:groups="",resources=events,verbs=create;patch - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the DataguardBroker object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.8.3/pkg/reconcile -func (r *DataguardBrokerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - - r.Log.Info("Reconcile requested") - - dataguardBroker := &dbapi.DataguardBroker{} - err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: req.Name}, dataguardBroker) - if err != nil { - if apierrors.IsNotFound(err) { - r.Log.Info("Resource deleted") - return requeueN, nil - } - return requeueN, err - } - - // Manage DataguardBroker Deletion - result, err := r.manageDataguardBrokerDeletion(req, ctx, dataguardBroker) - if result.Requeue { - r.Log.Info("Reconcile queued") - return result, err - } - if err != nil { - r.Log.Error(err, err.Error()) - return result, err - } - - // Fetch Primary Database Reference - singleInstanceDatabase := &dbapi.SingleInstanceDatabase{} - err = r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: dataguardBroker.Spec.PrimaryDatabaseRef}, singleInstanceDatabase) - if err != nil { - if apierrors.IsNotFound(err) { - r.Log.Info("Resource deleted") - return requeueN, nil - } - return requeueN, err - } - - /* Initialize Status */ - if dataguardBroker.Status.Status == "" { - dataguardBroker.Status.Status = dbcommons.StatusCreating - dataguardBroker.Status.ExternalConnectString = dbcommons.ValueUnavailable - dataguardBroker.Status.ClusterConnectString = dbcommons.ValueUnavailable - r.Status().Update(ctx, dataguardBroker) - } - - // Always refresh status before a reconcile - defer r.Status().Update(ctx, dataguardBroker) - - // Create Service to point to primary database always - result = r.createSVC(ctx, req, dataguardBroker) - if result.Requeue { - r.Log.Info("Reconcile queued") - return result, nil - } - - // Validate if Primary Database Reference is ready - result, sidbReadyPod, adminPassword := r.validateSidbReadiness(dataguardBroker, singleInstanceDatabase, ctx, req) - if result.Requeue { - r.Log.Info("Reconcile queued") - return result, nil - } - - // Setup the DG Configuration - result = r.setupDataguardBrokerConfiguration(dataguardBroker, singleInstanceDatabase, sidbReadyPod, adminPassword, ctx, req) - if result.Requeue { - r.Log.Info("Reconcile queued") - return result, nil - } - - // Set a particular database as primary - result = r.SetAsPrimaryDatabase(singleInstanceDatabase.Spec.Sid, dataguardBroker.Spec.SetAsPrimaryDatabase, dataguardBroker, - singleInstanceDatabase, adminPassword, ctx, req) - if result.Requeue { - r.Log.Info("Reconcile queued") - return result, nil - } - - // If LoadBalancer = true , ensure Connect String is updated - if dataguardBroker.Status.ExternalConnectString == dbcommons.ValueUnavailable { - return requeueY, nil - } - - dataguardBroker.Status.Status = dbcommons.StatusReady - - r.Log.Info("Reconcile completed") - return ctrl.Result{}, nil - -} - -// ##################################################################################################### -// -// Validate Readiness of the primary DB specified -// -// ##################################################################################################### -func (r *DataguardBrokerReconciler) validateSidbReadiness(m *dbapi.DataguardBroker, - n *dbapi.SingleInstanceDatabase, ctx context.Context, req ctrl.Request) (ctrl.Result, corev1.Pod, string) { - - log := r.Log.WithValues("validateSidbReadiness", req.NamespacedName) - adminPassword := "" - // ## FETCH THE SIDB REPLICAS . - sidbReadyPod, _, _, _, err := dbcommons.FindPods(r, n.Spec.Image.Version, - n.Spec.Image.PullFrom, n.Name, n.Namespace, ctx, req) - if err != nil { - log.Error(err, err.Error()) - return requeueY, sidbReadyPod, adminPassword - } - - if n.Status.Status != dbcommons.StatusReady { - - eventReason := "Waiting" - eventMsg := "Waiting for " + n.Name + " to be Ready" - r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) - return requeueY, sidbReadyPod, adminPassword - } - - // Validate databaseRef Admin Password - adminPasswordSecret := &corev1.Secret{} - err = r.Get(ctx, types.NamespacedName{Name: n.Spec.AdminPassword.SecretName, Namespace: n.Namespace}, adminPasswordSecret) - if err != nil { - if apierrors.IsNotFound(err) { - //m.Status.Status = dbcommons.StatusError - eventReason := "Waiting" - eventMsg := "waiting for secret : " + n.Spec.AdminPassword.SecretName + " to get created" - r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) - r.Log.Info("Secret " + n.Spec.AdminPassword.SecretName + " Not Found") - return requeueY, sidbReadyPod, adminPassword - } - log.Error(err, err.Error()) - return requeueY, sidbReadyPod, adminPassword - } - adminPassword = string(adminPasswordSecret.Data[n.Spec.AdminPassword.SecretKey]) - - out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, true, "bash", "-c", - fmt.Sprintf("echo -e \"%s\" | %s", fmt.Sprintf(dbcommons.ValidateAdminPassword, adminPassword), dbcommons.GetSqlClient(n.Spec.Edition))) - if err != nil { - log.Error(err, err.Error()) - return requeueY, sidbReadyPod, adminPassword - } - if strings.Contains(out, "USER is \"SYS\"") { - log.Info("validated Admin password successfully") - } else if strings.Contains(out, "ORA-01017") { - //m.Status.Status = dbcommons.StatusError - eventReason := "Logon denied" - eventMsg := "invalid databaseRef admin password. secret: " + n.Spec.AdminPassword.SecretName - r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) - return requeueY, sidbReadyPod, adminPassword - } else { - return requeueY, sidbReadyPod, adminPassword - } - - return requeueN, sidbReadyPod, adminPassword -} - -// ############################################################################# -// -// Instantiate Service spec from StandbyDatabase spec -// -// ############################################################################# -func (r *DataguardBrokerReconciler) instantiateSVCSpec(m *dbapi.DataguardBroker) *corev1.Service { - svc := &corev1.Service{ - TypeMeta: metav1.TypeMeta{ - Kind: "Service", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: m.Name, - Namespace: m.Namespace, - Labels: map[string]string{ - "app": m.Name, - }, - Annotations: func() map[string]string { - annotations := make(map[string]string) - if len(m.Spec.ServiceAnnotations) != 0 { - for key, value := range m.Spec.ServiceAnnotations { - annotations[key] = value - } - } - return annotations - }(), - }, - Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{ - { - Name: "listener", - Port: 1521, - Protocol: corev1.ProtocolTCP, - }, - { - Name: "xmldb", - Port: 5500, - Protocol: corev1.ProtocolTCP, - }, - }, - Selector: map[string]string{ - "app": m.Name, - }, - Type: corev1.ServiceType(func() string { - if m.Spec.LoadBalancer { - return "LoadBalancer" - } - return "NodePort" - }()), - }, - } - // Set StandbyDatabase instance as the owner and controller - ctrl.SetControllerReference(m, svc, r.Scheme) - return svc -} - -// ############################################################################# -// -// Create a Service for StandbyDatabase -// -// ############################################################################# -func (r *DataguardBrokerReconciler) createSVC(ctx context.Context, req ctrl.Request, - m *dbapi.DataguardBroker) ctrl.Result { - - log := r.Log.WithValues("createSVC", req.NamespacedName) - // Check if the Service already exists, if not create a new one - svc := &corev1.Service{} - // Get retrieves an obj for the given object key from the Kubernetes Cluster. - // obj must be a struct pointer so that obj can be updated with the response returned by the Server. - // Here foundsvc is the struct pointer to corev1.Service{} - err := r.Get(ctx, types.NamespacedName{Name: m.Name, Namespace: m.Namespace}, svc) - if err != nil && apierrors.IsNotFound(err) { - // Define a new Service - svc = r.instantiateSVCSpec(m) - log.Info("Creating a new Service", "Service.Namespace", svc.Namespace, "Service.Name", svc.Name) - err = r.Create(ctx, svc) - //err = r.Update(ctx, svc) - if err != nil { - log.Error(err, "Failed to create new Service", "Service.Namespace", svc.Namespace, "Service.Name", svc.Name) - return requeueY - } else { - timeout := 30 - // Waiting for Service to get created as sometimes it takes some time to create a service . 30 seconds TImeout - err = dbcommons.WaitForStatusChange(r, svc.Name, m.Namespace, ctx, req, time.Duration(timeout)*time.Second, "svc", "creation") - if err != nil { - log.Error(err, "Error in Waiting for svc status for Creation", "svc.Namespace", svc.Namespace, "SVC.Name", svc.Name) - return requeueY - } - log.Info("Succesfully Created New Service ", "Service.Name : ", svc.Name) - } - time.Sleep(10 * time.Second) - - } else if err != nil { - log.Error(err, "Failed to get Service") - return requeueY - } else if err == nil { - log.Info(" ", "Found Existing Service ", svc.Name) - } - - // update service status - log.Info("Updating the service status...") - m.Status.ClusterConnectString = svc.Name + "." + svc.Namespace + ":" + fmt.Sprint(svc.Spec.Ports[0].Port) + "/DATAGUARD" - if m.Spec.LoadBalancer { - if len(svc.Status.LoadBalancer.Ingress) > 0 { - lbAddress := svc.Status.LoadBalancer.Ingress[0].Hostname - if lbAddress == "" { - lbAddress = svc.Status.LoadBalancer.Ingress[0].IP - } - m.Status.ExternalConnectString = lbAddress + ":" + fmt.Sprint(svc.Spec.Ports[0].Port) + "/DATAGUARD" - } - } else { - nodeip := dbcommons.GetNodeIp(r, ctx, req) - if nodeip != "" { - m.Status.ExternalConnectString = nodeip + ":" + fmt.Sprint(svc.Spec.Ports[0].NodePort) + "/DATAGUARD" - } - } - r.Status().Update(ctx, m) - - return requeueN -} - -// ############################################################################# -// -// Setup the requested DG Configuration -// -// ############################################################################# -func (r *DataguardBrokerReconciler) setupDataguardBrokerConfiguration(m *dbapi.DataguardBroker, n *dbapi.SingleInstanceDatabase, - sidbReadyPod corev1.Pod, adminPassword string, ctx context.Context, req ctrl.Request) ctrl.Result { - log := r.Log.WithValues("setupDataguardBrokerConfiguration", req.NamespacedName) - - databases, _, err := dbcommons.GetDatabasesInDgConfig(sidbReadyPod, r, r.Config, ctx, req) - dbSet := make(map[string]struct{}) - if err != nil { - if err.Error() != "databases in DG config is nil" { - return requeueY - } - } - if len(databases) > 0 { - log.Info("Databases in DG config are :") - for i := 0; i < len(databases); i++ { - log.Info(strings.Split(databases[i], ":")[0]) - dbSet[strings.ToUpper(strings.Split(databases[i], ":")[0])] = struct{}{} - } - } - - for i := 0; i < len(m.Spec.StandbyDatabaseRefs); i++ { - - standbyDatabase := &dbapi.SingleInstanceDatabase{} - err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: m.Spec.StandbyDatabaseRefs[i]}, standbyDatabase) - if err != nil { - if apierrors.IsNotFound(err) { - eventReason := "Warning" - eventMsg := m.Spec.StandbyDatabaseRefs[i] + "not found" - r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) - continue - } - log.Error(err, err.Error()) - return requeueY - } - - // Check if dataguard broker is already configured for the standby database - if standbyDatabase.Status.DgBrokerConfigured { - log.Info("Dataguard broker for standbyDatabase : " + standbyDatabase.Name + " is already configured") - continue - } - _, ok := dbSet[standbyDatabase.Status.Sid] - if ok { - log.Info("A database with the same SID is already configured in the DG") - r.Recorder.Eventf(m, corev1.EventTypeWarning, "Spec Error", "A database with the same SID "+standbyDatabase.Status.Sid+" is already configured in the DG") - continue - } - - m.Status.Status = dbcommons.StatusCreating - r.Status().Update(ctx, m) - - // ## FETCH THE STANDBY REPLICAS . - standbyDatabaseReadyPod, _, _, _, err := dbcommons.FindPods(r, n.Spec.Image.Version, - n.Spec.Image.PullFrom, standbyDatabase.Name, standbyDatabase.Namespace, ctx, req) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - - if standbyDatabase.Status.Status != dbcommons.StatusReady { - - eventReason := "Waiting" - eventMsg := "Waiting for " + standbyDatabase.Name + " to be Ready" - r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) - return requeueY - - } - - result := r.setupDataguardBrokerConfigurationForGivenDB(m, n, standbyDatabase, standbyDatabaseReadyPod, sidbReadyPod, ctx, req, adminPassword) - if result.Requeue { - return result - } - - // Update Databases - r.updateReconcileStatus(m, sidbReadyPod, ctx, req) - } - - eventReason := "DG Configuration up to date" - eventMsg := "" - - // Patch DataguardBroker Service to point selector to Current Primary Name - result := r.patchService(m, sidbReadyPod, n, ctx, req) - if result.Requeue { - return result - } - - r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) - - return requeueN -} - -// ############################################################################# -// -// Patch DataguardBroker Service to point selector to Current Primary Name -// -// ############################################################################# -func (r *DataguardBrokerReconciler) patchService(m *dbapi.DataguardBroker, sidbReadyPod corev1.Pod, n *dbapi.SingleInstanceDatabase, - ctx context.Context, req ctrl.Request) ctrl.Result { - log := r.Log.WithValues("patchService", req.NamespacedName) - databases, out, err := dbcommons.GetDatabasesInDgConfig(sidbReadyPod, r, r.Config, ctx, req) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - if !strings.Contains(out, "ORA-") { - primarySid := strings.ToUpper(dbcommons.GetPrimaryDatabase(databases)) - primaryName := n.Name - if primarySid != n.Spec.Sid { - primaryName = n.Status.StandbyDatabases[primarySid] - } - - // Patch DataguardBroker Service to point selector to Current Primary Name - svc := &corev1.Service{} - err = r.Get(ctx, types.NamespacedName{Name: req.Name, Namespace: req.Namespace}, svc) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - svc.Spec.Selector["app"] = primaryName - err = r.Update(ctx, svc) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - - m.Status.ClusterConnectString = svc.Name + "." + svc.Namespace + ":" + fmt.Sprint(svc.Spec.Ports[0].Port) + "/DATAGUARD" - if m.Spec.LoadBalancer { - if len(svc.Status.LoadBalancer.Ingress) > 0 { - lbAddress := svc.Status.LoadBalancer.Ingress[0].Hostname - if lbAddress == "" { - lbAddress = svc.Status.LoadBalancer.Ingress[0].IP - } - m.Status.ExternalConnectString = lbAddress + ":" + fmt.Sprint(svc.Spec.Ports[0].Port) + "/DATAGUARD" - } - } else { - nodeip := dbcommons.GetNodeIp(r, ctx, req) - if nodeip != "" { - m.Status.ExternalConnectString = nodeip + ":" + fmt.Sprint(svc.Spec.Ports[0].NodePort) + "/DATAGUARD" - } - } - } - return requeueN -} - -// ############################################################################# -// -// Set up DG Configuration for a given StandbyDatabase -// -// ############################################################################# -func (r *DataguardBrokerReconciler) setupDataguardBrokerConfigurationForGivenDB(m *dbapi.DataguardBroker, n *dbapi.SingleInstanceDatabase, standbyDatabase *dbapi.SingleInstanceDatabase, - standbyDatabaseReadyPod corev1.Pod, sidbReadyPod corev1.Pod, ctx context.Context, req ctrl.Request, adminPassword string) ctrl.Result { - - log := r.Log.WithValues("setupDataguardBrokerConfigurationForGivenDB", req.NamespacedName) - - if standbyDatabaseReadyPod.Name == "" || sidbReadyPod.Name == "" { - return requeueY - } - - // ## CHECK IF DG CONFIGURATION AVAILABLE IN PRIMARY DATABSE## - out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", - fmt.Sprintf("echo -e \"%s\" | dgmgrl / as sysdba ", dbcommons.DBShowConfigCMD)) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - log.Info("ShowConfiguration Output") - log.Info(out) - - if strings.Contains(out, "ORA-16525") { - log.Info("ORA-16525: The Oracle Data Guard broker is not yet available on Primary") - return requeueY - } - - _, err = dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, true, "bash", "-c", - fmt.Sprintf(dbcommons.CreateAdminPasswordFile, adminPassword)) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - log.Info("DB Admin pwd file created") - - // ORA-16532: Oracle Data Guard broker configuration does not exist , so create one - if strings.Contains(out, "ORA-16532") { - if m.Spec.ProtectionMode == "MaxPerformance" { - // Construct the password file and dgbroker command file - out, err := dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", - fmt.Sprintf(dbcommons.CreateDGMGRLScriptFile, dbcommons.DataguardBrokerMaxPerformanceCMD)) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - log.Info("DGMGRL command file creation output") - log.Info(out) - - // ## DG CONFIGURATION FOR PRIMARY DB || MODE : MAXPERFORMANCE ## - out, err = dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", - "dgmgrl sys@${PRIMARY_DB_CONN_STR} @dgmgrl.cmd < admin.pwd && rm -rf dgmgrl.cmd") - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - log.Info("DgConfigurationMaxPerformance Output") - log.Info(out) - } else if m.Spec.ProtectionMode == "MaxAvailability" { - // ## DG CONFIGURATION FOR PRIMARY DB || MODE : MAX AVAILABILITY ## - out, err := dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", - fmt.Sprintf(dbcommons.CreateDGMGRLScriptFile, dbcommons.DataguardBrokerMaxAvailabilityCMD)) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - log.Info("DGMGRL command file creation output") - log.Info(out) - - // ## DG CONFIGURATION FOR PRIMARY DB || MODE : MAXPERFORMANCE ## - out, err = dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", - "dgmgrl sys@${PRIMARY_DB_CONN_STR} @dgmgrl.cmd < admin.pwd && rm -rf dgmgrl.cmd") - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - log.Info("DgConfigurationMaxAvailability Output") - log.Info(out) - } else { - log.Info("SPECIFY correct Protection Mode . Either MaxAvailability or MaxPerformance") - return requeueY - } - - // ## SHOW CONFIGURATION DG - out, err := dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", - fmt.Sprintf("echo -e \"%s\" | dgmgrl / as sysdba ", dbcommons.DBShowConfigCMD)) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } else { - log.Info("ShowConfiguration Output") - log.Info(out) - } - // Set DG Configured status to true for this standbyDatabase and primary Database. so that in next reconcilation, we dont configure this again - n.Status.DgBrokerConfigured = true - standbyDatabase.Status.DgBrokerConfigured = true - r.Status().Update(ctx, standbyDatabase) - r.Status().Update(ctx, n) - // Remove admin pwd file - _, err = dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, true, "bash", "-c", - dbcommons.RemoveAdminPasswordFile) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - log.Info("DB Admin pwd file removed") - - return requeueN - } - - // DG Configuration Exists . So add the standbyDatabase to the existing DG Configuration - databases, _, err := dbcommons.GetDatabasesInDgConfig(sidbReadyPod, r, r.Config, ctx, req) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - - // ## ADD DATABASE TO DG CONFIG , IF NOT PRESENT - found, _ := dbcommons.IsDatabaseFound(standbyDatabase.Spec.Sid, databases, "") - if found { - return requeueN - } - primarySid := dbcommons.GetPrimaryDatabase(databases) - - // If user adds a new standby to a dg config when failover happened to one ot the standbys, we need to have current primary connect string - primaryConnectString := n.Name + ":1521/" + primarySid - if !strings.EqualFold(primarySid, n.Spec.Sid) { - primaryConnectString = n.Status.StandbyDatabases[primarySid] + ":1521/" + primarySid - } - - if m.Spec.ProtectionMode == "MaxPerformance" { - // ## DG CONFIGURATION FOR PRIMARY DB || MODE : MAXPERFORMANCE ## - out, err := dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", - fmt.Sprintf(dbcommons.CreateDGMGRLScriptFile, dbcommons.DataguardBrokerAddDBMaxPerformanceCMD)) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - log.Info("DGMGRL command file creation output") - log.Info(out) - - out, err = dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", - fmt.Sprintf("dgmgrl sys@%s @dgmgrl.cmd < admin.pwd && rm -rf dgmgrl.cmd ", primaryConnectString)) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - log.Info("DgConfigurationMaxPerformance Output") - log.Info(out) - - } else if m.Spec.ProtectionMode == "MaxAvailability" { - // ## DG CONFIGURATION FOR PRIMARY DB || MODE : MAX AVAILABILITY ## - out, err := dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", - fmt.Sprintf(dbcommons.CreateDGMGRLScriptFile, dbcommons.DataguardBrokerAddDBMaxAvailabilityCMD)) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - log.Info("DGMGRL command file creation output") - log.Info(out) - - out, err = dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", - fmt.Sprintf("dgmgrl sys@%s @dgmgrl.cmd < admin.pwd && rm -rf dgmgrl.cmd ", primaryConnectString)) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - log.Info("DgConfigurationMaxAvailability Output") - log.Info(out) - - } else { - log.Info("SPECIFY correct Protection Mode . Either MaxAvailability or MaxPerformance") - log.Error(err, err.Error()) - return requeueY - } - - databases, _, err = dbcommons.GetDatabasesInDgConfig(sidbReadyPod, r, r.Config, ctx, req) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - - // ## SET PROPERTY FASTSTARTFAILOVERTARGET FOR EACH DATABASE TO ALL OTHER DATABASES IN DG CONFIG . - if m.Spec.FastStartFailOver.Enable { - for i := 0; i < len(databases); i++ { - out, err = dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", - fmt.Sprintf("dgmgrl sys@%s \"EDIT DATABASE %s SET PROPERTY FASTSTARTFAILOVERTARGET=%s\"< admin.pwd", primaryConnectString, - strings.Split(databases[i], ":")[0], getFSFOTargets(i, databases))) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - log.Info("SETTING FSFO TARGET OUTPUT") - log.Info(out) - - out, err = dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", - fmt.Sprintf("dgmgrl sys@%s \"SHOW DATABASE %s FASTSTARTFAILOVERTARGET\" < admin.pwd", primaryConnectString, strings.Split(databases[i], ":")[0])) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - log.Info("FSFO TARGETS OF " + databases[i]) - log.Info(out) - - } - } - // Remove admin pwd file - _, err = dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, true, "bash", "-c", - dbcommons.RemoveAdminPasswordFile) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - log.Info("DB Admin pwd file removed") - - // Set DG Configured status to true for this standbyDatabase. so that in next reconcilation, we dont configure this again - standbyDatabase.Status.DgBrokerConfigured = true - r.Status().Update(ctx, standbyDatabase) - - return requeueN -} - -// ############################################################################# -// -// Remove a Database from DG Configuration -// -// ############################################################################# -// -//lint:ignore U1000 deferred for next release -func (r *DataguardBrokerReconciler) removeDatabaseFromDGConfig(m *dbapi.DataguardBroker, n *dbapi.SingleInstanceDatabase, standbyDatabase *dbapi.SingleInstanceDatabase, standbyDatabaseReadyPod corev1.Pod, sidbReadyPod corev1.Pod, ctx context.Context, req ctrl.Request) ctrl.Result { - log := r.Log.WithValues("removeDataguardBrokerConfigurationForGivenDB", req.NamespacedName) - - if standbyDatabaseReadyPod.Name == "" || sidbReadyPod.Name == "" { - return requeueY - } - - // ## CHECK IF DG CONFIGURATION IS AVAILABLE IN PRIMARY DATABASE ## - out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", - fmt.Sprintf("echo -e \"%s\" | dgmgrl / as sysdba", dbcommons.DBShowConfigCMD)) - - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - log.Info("Showconfiguration Output") - log.Info(out) - - if strings.Contains(out, "ORA-16525") { - log.Info("ORA-16525: The Oracle Data Guard broker is not yet available on Primary") - return requeueY - } - - // ## REMOVING STANDBY DATABASE FROM DG CONFIGURATION ## - _, err = dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", - fmt.Sprintf(dbcommons.CreateDGMGRLScriptFile, dbcommons.RemoveStandbyDBFromDGConfgCMD)) - - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - - // ## SHOW CONFIGURATION - _, err = dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", - fmt.Sprintf("echo -e \"%s\" | dgmgrl / as sysdba", dbcommons.DBShowConfigCMD)) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - log.Info("Showconfiguration Output") - log.Info(out) - // Set DG Configured status to false for this standbyDatabase. so that in next reconcilation, we dont configure this again - standbyDatabase.Status.DgBrokerConfigured = false - r.Status().Update(ctx, standbyDatabase) - - return requeueN -} - -// ############################################################################# -// -// Return FSFO targets of each StandbyDatabase -// Concatenation of all strings in databases slice expecting that of index 1 -// -// ############################################################################# -func getFSFOTargets(index int, databases []string) string { - fsfotargets := "" - for i := 0; i < len(databases); i++ { - if i != index { - splitstr := strings.Split(databases[i], ":") - if fsfotargets == "" { - fsfotargets = splitstr[0] - } else { - fsfotargets = fsfotargets + "," + splitstr[0] - } - } - } - return fsfotargets -} - -// ##################################################################################################### -// -// Switchovers to 'sid' db to make 'sid' db primary -// -// ##################################################################################################### -func (r *DataguardBrokerReconciler) SetAsPrimaryDatabase(sidbSid string, targetSid string, m *dbapi.DataguardBroker, n *dbapi.SingleInstanceDatabase, - adminPassword string, ctx context.Context, req ctrl.Request) ctrl.Result { - - log := r.Log.WithValues("SetAsPrimaryDatabase", req.NamespacedName) - if targetSid == "" { - log.Info("Specified sid is nil") - return requeueN - } - - // Fetch the SIDB Ready Pod - sidbReadyPod, _, _, _, err := dbcommons.FindPods(r, n.Spec.Image.Version, - (n.Spec.Image.PullFrom), n.Name, n.Namespace, ctx, req) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - - // Fetch databases in dataguard broker configuration - databases, _, err := dbcommons.GetDatabasesInDgConfig(sidbReadyPod, r, r.Config, ctx, req) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - - dbInDgConfig := false - for i := 0; i < len(databases); i++ { - splitstr := strings.Split(databases[i], ":") - if strings.ToUpper(splitstr[0]) == strings.ToUpper(targetSid) { - dbInDgConfig = true - break - } - } - - if !dbInDgConfig { - eventReason := "Cannot Switchover" - eventMsg := fmt.Sprintf("Database %s not a part of the dataguard configuration", targetSid) - r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) - return requeueN - } - - // Fetch the current Primary database - primarySid := dbcommons.GetPrimaryDatabase(databases) - if strings.EqualFold(primarySid, targetSid) { - log.Info(targetSid + " is already Primary") - return requeueN - } - - m.Status.Status = dbcommons.StatusUpdating - r.Status().Update(ctx, m) - - found, _ := dbcommons.IsDatabaseFound(targetSid, databases, "") - if !found { - log.Info(targetSid + " not yet set in DG config") - return requeueY - } - - // Fetch the PrimarySid Ready Pod to create chk file - var primaryReq ctrl.Request - var primaryReadyPod corev1.Pod - if !strings.EqualFold(primarySid, sidbSid) { - primaryReq = ctrl.Request{ - NamespacedName: types.NamespacedName{ - Namespace: req.Namespace, - Name: n.Status.StandbyDatabases[strings.ToUpper(primarySid)], - }, - } - primaryReadyPod, _, _, _, err = dbcommons.FindPods(r, "", "", primaryReq.Name, primaryReq.Namespace, ctx, req) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - } else { - primaryReadyPod = sidbReadyPod - } - - // Fetch the targetSid Ready Pod to create chk file - var targetReq ctrl.Request - var targetReadyPod corev1.Pod - if !strings.EqualFold(targetSid, sidbSid) { - targetReq = ctrl.Request{ - NamespacedName: types.NamespacedName{ - Namespace: req.Namespace, - Name: n.Status.StandbyDatabases[strings.ToUpper(targetSid)], - }, - } - targetReadyPod, _, _, _, err = dbcommons.FindPods(r, "", "", targetReq.Name, targetReq.Namespace, ctx, req) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - } else { - targetReadyPod = sidbReadyPod - } - - // Create a chk File so that no other pods take the lock during Switchover . - out, err := dbcommons.ExecCommand(r, r.Config, primaryReadyPod.Name, primaryReadyPod.Namespace, "", ctx, req, false, "bash", "-c", dbcommons.CreateChkFileCMD) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - log.Info("Successfully Created chk file " + out) - out, err = dbcommons.ExecCommand(r, r.Config, targetReadyPod.Name, targetReadyPod.Namespace, "", ctx, req, false, "bash", "-c", dbcommons.CreateChkFileCMD) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - log.Info("Successfully Created chk file " + out) - - eventReason := "Waiting" - eventMsg := "Switchover In Progress" - r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) - - // Connect to 'primarySid' db using dgmgrl and switchover to 'targetSid' db to make 'targetSid' db primary - _, err = dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, true, "bash", "-c", - fmt.Sprintf(dbcommons.CreateAdminPasswordFile, adminPassword)) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - log.Info("DB Admin pwd file created") - - out, err = dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", - fmt.Sprintf("dgmgrl sys@%s \"SWITCHOVER TO %s\" < admin.pwd", primarySid, targetSid)) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - log.Info("SWITCHOVER TO " + targetSid + " Output") - log.Info(out) - - //Delete pwd file - _, err = dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, true, "bash", "-c", - dbcommons.RemoveAdminPasswordFile) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - log.Info("DB Admin pwd file removed") - - eventReason = "Success" - eventMsg = "Switchover Completed Successfully" - r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) - - // Remove the chk File . - _, err = dbcommons.ExecCommand(r, r.Config, primaryReadyPod.Name, primaryReadyPod.Namespace, "", ctx, req, false, "bash", "-c", dbcommons.RemoveChkFileCMD) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - out, err = dbcommons.ExecCommand(r, r.Config, targetReadyPod.Name, targetReadyPod.Namespace, "", ctx, req, false, "bash", "-c", dbcommons.RemoveChkFileCMD) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - log.Info("Successfully Removed chk file " + out) - - // Update Databases - r.updateReconcileStatus(m, sidbReadyPod, ctx, req) - - // Update status of Primary true/false on 'primary' db (From which switchover initiated) - if !strings.EqualFold(primarySid, sidbSid) { - - standbyDatabase := &dbapi.SingleInstanceDatabase{} - err = r.Get(ctx, primaryReq.NamespacedName, standbyDatabase) - if err != nil { - return requeueN - } - out, err := dbcommons.GetDatabaseRole(primaryReadyPod, r, r.Config, ctx, primaryReq) - if err == nil { - standbyDatabase.Status.Role = strings.ToUpper(out) - } - r.Status().Update(ctx, standbyDatabase) - - } else { - sidbReq := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Namespace: req.Namespace, - Name: n.Name, - }, - } - out, err := dbcommons.GetDatabaseRole(sidbReadyPod, r, r.Config, ctx, sidbReq) - if err == nil { - n.Status.Role = strings.ToUpper(out) - } - r.Status().Update(ctx, n) - } - - // Update status of Primary true/false on 'sid' db (To which switchover initiated) - if !strings.EqualFold(targetSid, sidbSid) { - - standbyDatabase := &dbapi.SingleInstanceDatabase{} - err = r.Get(ctx, targetReq.NamespacedName, standbyDatabase) - if err != nil { - return requeueN - } - out, err := dbcommons.GetDatabaseRole(targetReadyPod, r, r.Config, ctx, targetReq) - if err == nil { - standbyDatabase.Status.Role = strings.ToUpper(out) - } - r.Status().Update(ctx, standbyDatabase) - - } else { - sidbReq := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Namespace: req.Namespace, - Name: n.Name, - }, - } - out, err := dbcommons.GetDatabaseRole(sidbReadyPod, r, r.Config, ctx, sidbReq) - if err == nil { - n.Status.Role = strings.ToUpper(out) - } - r.Status().Update(ctx, n) - } - - // Patch DataguardBroker Service to point selector to Current Primary Name and updates client db connection strings on dataguardBroker - result := r.patchService(m, sidbReadyPod, n, ctx, req) - if result.Requeue { - return result - } - - return requeueN -} - -// ############################################################################# -// -// Update Reconcile Status -// -// ############################################################################# -func (r *DataguardBrokerReconciler) updateReconcileStatus(m *dbapi.DataguardBroker, sidbReadyPod corev1.Pod, - ctx context.Context, req ctrl.Request) (err error) { - - // ConnectStrings updated in PatchService() - var databases []string - databases, _, err = dbcommons.GetDatabasesInDgConfig(sidbReadyPod, r, r.Config, ctx, req) - if err == nil { - primaryDatabase := "" - standbyDatabases := "" - for i := 0; i < len(databases); i++ { - splitstr := strings.Split(databases[i], ":") - if strings.ToUpper(splitstr[1]) == "PRIMARY" { - primaryDatabase = strings.ToUpper(splitstr[0]) - } - if strings.ToUpper(splitstr[1]) == "PHYSICAL_STANDBY" { - if standbyDatabases != "" { - standbyDatabases += "," + strings.ToUpper(splitstr[0]) - } else { - standbyDatabases = strings.ToUpper(splitstr[0]) - } - } - } - m.Status.PrimaryDatabase = primaryDatabase - m.Status.StandbyDatabases = standbyDatabases - } - - m.Status.PrimaryDatabaseRef = m.Spec.PrimaryDatabaseRef - m.Status.ProtectionMode = m.Spec.ProtectionMode - return -} - -// ############################################################################# -// -// Manage Finalizer to cleanup before deletion of DataguardBroker -// -// ############################################################################# -func (r *DataguardBrokerReconciler) manageDataguardBrokerDeletion(req ctrl.Request, ctx context.Context, m *dbapi.DataguardBroker) (ctrl.Result, error) { - log := r.Log.WithValues("manageDataguardBrokerDeletion", req.NamespacedName) - - // Check if the DataguardBroker instance is marked to be deleted, which is - // indicated by the deletion timestamp being set. - isDataguardBrokerMarkedToBeDeleted := m.GetDeletionTimestamp() != nil - if isDataguardBrokerMarkedToBeDeleted { - if controllerutil.ContainsFinalizer(m, dataguardBrokerFinalizer) { - // Run finalization logic for dataguardBrokerFinalizer. If the - // finalization logic fails, don't remove the finalizer so - // that we can retry during the next reconciliation. - result, err := r.cleanupDataguardBroker(req, ctx, m) - if result.Requeue { - return result, err - } - - // Remove dataguardBrokerFinalizer. Once all finalizers have been - // removed, the object will be deleted. - controllerutil.RemoveFinalizer(m, dataguardBrokerFinalizer) - err = r.Update(ctx, m) - if err != nil { - log.Error(err, err.Error()) - return requeueY, err - } - } - return requeueY, errors.New("deletion pending") - } - - // Add finalizer for this CR - if !controllerutil.ContainsFinalizer(m, dataguardBrokerFinalizer) { - controllerutil.AddFinalizer(m, dataguardBrokerFinalizer) - err := r.Update(ctx, m) - if err != nil { - log.Error(err, err.Error()) - return requeueY, err - } - } - return requeueN, nil -} - -// ############################################################################# -// -// Finalization logic for DataguardBrokerFinalizer -// -// ############################################################################# -func (r *DataguardBrokerReconciler) cleanupDataguardBroker(req ctrl.Request, ctx context.Context, m *dbapi.DataguardBroker) (ctrl.Result, error) { - log := r.Log.WithValues("cleanupDataguardBroker", req.NamespacedName) - - // Fetch Primary Database Reference - singleInstanceDatabase := &dbapi.SingleInstanceDatabase{} - err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: m.Spec.PrimaryDatabaseRef}, singleInstanceDatabase) - if err != nil { - if apierrors.IsNotFound(err) { - r.Log.Info("Resource deleted. No need to remove dataguard configuration") - return requeueN, nil - } - return requeueY, err - } - - // Validate if Primary Database Reference is ready - result, sidbReadyPod, _ := r.validateSidbReadiness(m, singleInstanceDatabase, ctx, req) - if result.Requeue { - r.Log.Info("Reconcile queued") - return result, nil - } - - // Get Primary database to remove dataguard configuration - _, _, err = dbcommons.GetDatabasesInDgConfig(sidbReadyPod, r, r.Config, ctx, req) - if err != nil { - log.Error(err, err.Error()) - return requeueY, err - } - - //primarySid := dbcommons.GetPrimaryDatabase(databases) - - out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", - fmt.Sprintf("echo -e \"%s\" | dgmgrl / as sysdba ", dbcommons.RemoveDataguardConfiguration)) - if err != nil { - log.Error(err, err.Error()) - return requeueY, err - } - log.Info("RemoveDataguardConfiguration Output") - log.Info(out) - - for i := 0; i < len(m.Spec.StandbyDatabaseRefs); i++ { - - standbyDatabase := &dbapi.SingleInstanceDatabase{} - err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: m.Spec.StandbyDatabaseRefs[i]}, standbyDatabase) - if err != nil { - if apierrors.IsNotFound(err) { - continue - } - log.Error(err, err.Error()) - return requeueY, err - } - - // Set DgBrokerConfigured to false - standbyDatabase.Status.DgBrokerConfigured = false - r.Status().Update(ctx, standbyDatabase) - } - - singleInstanceDatabase.Status.DgBrokerConfigured = false - r.Status().Update(ctx, singleInstanceDatabase) - - log.Info("Successfully cleaned up Dataguard Broker") - return requeueN, nil -} - -// ############################################################################# -// -// SetupWithManager sets up the controller with the Manager -// -// ############################################################################# -func (r *DataguardBrokerReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&dbapi.DataguardBroker{}). - Owns(&corev1.Pod{}). //Watch for deleted pods of DataguardBroker Owner - WithEventFilter(dbcommons.ResourceEventHandler()). - WithOptions(controller.Options{MaxConcurrentReconciles: 100}). //ReconcileHandler is never invoked concurrently with the same object. - Complete(r) -} diff --git a/controllers/database/dbcssystem_controller.go b/controllers/database/dbcssystem_controller.go index 003be62a..1fd94dde 100644 --- a/controllers/database/dbcssystem_controller.go +++ b/controllers/database/dbcssystem_controller.go @@ -1,5 +1,5 @@ /* -** Copyright (c) 2022 Oracle and/or its affiliates. +** Copyright (c) 2022-2024 Oracle and/or its affiliates. ** ** The Universal Permissive License (UPL), Version 1.0 ** @@ -40,26 +40,35 @@ package controllers import ( "context" + "fmt" "reflect" + "strings" + "time" - databasev1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + databasev4 "github.com/oracle/oracle-database-operator/apis/database/v4" dbcsv1 "github.com/oracle/oracle-database-operator/commons/dbcssystem" "github.com/oracle/oracle-database-operator/commons/finalizer" "github.com/oracle/oracle-database-operator/commons/oci" "github.com/go-logr/logr" + "github.com/oracle/oci-go-sdk/v65/common" "github.com/oracle/oci-go-sdk/v65/core" "github.com/oracle/oci-go-sdk/v65/database" + "github.com/oracle/oci-go-sdk/v65/keymanagement" "github.com/oracle/oci-go-sdk/v65/workrequests" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/retry" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" ) // DbcsSystemReconciler reconciles a DbcsSystem object @@ -74,7 +83,7 @@ type DbcsSystemReconciler struct { Recorder record.EventRecorder } -//+kubebuilder:rbac:groups=database.oracle.com,resources=dbcssystems,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=database.oracle.com,resources=dbcssystems,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=database.oracle.com,resources=dbcssystems/status,verbs=get;update;patch // +kubebuilder:rbac:groups=database.oracle.com,resources=dbcssystems/finalizers,verbs=get;create;update;patch;delete // +kubebuilder:rbac:groups="",resources=configmaps;secrets;namespaces,verbs=get;list;watch;create;update;patch;delete @@ -91,12 +100,13 @@ type DbcsSystemReconciler struct { func (r *DbcsSystemReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { r.Logger = log.FromContext(ctx) - // your logic here - - //r.Logger = r.Logv1.WithValues("Instance.Namespace", req.NamespacedName) var err error + resultNq := ctrl.Result{Requeue: false} + resultQ := ctrl.Result{Requeue: true, RequeueAfter: 60 * time.Second} + // Get the dbcs instance from the cluster - dbcsInst := &databasev1alpha1.DbcsSystem{} + dbcsInst := &databasev4.DbcsSystem{} + r.Logger.Info("Reconciling DbSystemDetails", "name", req.NamespacedName) if err := r.KubeClient.Get(context.TODO(), req.NamespacedName, dbcsInst); err != nil { if !errors.IsNotFound(err) { @@ -105,28 +115,35 @@ func (r *DbcsSystemReconciler) Reconcile(ctx context.Context, req ctrl.Request) } // Create oci-go-sdk client - authData := oci.APIKeyAuth{ - ConfigMapName: &dbcsInst.Spec.OCIConfigMap, - SecretName: &dbcsInst.Spec.OCISecret, + authData := oci.ApiKeyAuth{ + ConfigMapName: dbcsInst.Spec.OCIConfigMap, + SecretName: dbcsInst.Spec.OCISecret, Namespace: dbcsInst.GetNamespace(), } - provider, err := oci.GetOCIProvider(r.KubeClient, authData) + provider, err := oci.GetOciProvider(r.KubeClient, authData) if err != nil { - return ctrl.Result{}, err + result := resultNq + return result, err } r.dbClient, err = database.NewDatabaseClientWithConfigurationProvider(provider) if err != nil { - return ctrl.Result{}, err + result := resultNq + return result, err } r.nwClient, err = core.NewVirtualNetworkClientWithConfigurationProvider(provider) if err != nil { - return ctrl.Result{}, err + result := resultNq + return result, err } r.wrClient, err = workrequests.NewWorkRequestClientWithConfigurationProvider(provider) + if err != nil { + result := resultNq + return result, err + } r.Logger.Info("OCI provider configured succesfully") /* @@ -148,111 +165,372 @@ func (r *DbcsSystemReconciler) Reconcile(ctx context.Context, req ctrl.Request) if err := dbcsv1.DeleteDbcsSystemSystem(r.dbClient, *dbcsInst.Spec.Id); err != nil { r.Logger.Error(err, "Fail to terminate DbcsSystem Instance") // Change the status to Failed - if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcsInst, databasev1alpha1.Terminate, r.nwClient, r.wrClient); statusErr != nil { - return ctrl.Result{}, statusErr + if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcsInst, databasev4.Terminate, r.nwClient, r.wrClient); statusErr != nil { + result := resultNq + return result, err } // The reconciler should not requeue since the error returned from OCI during update will not be solved by requeue - return ctrl.Result{}, nil + result := resultNq + return result, err + } + + // Check if PDBConfig is defined + pdbConfigs := dbcsInst.Spec.PdbConfigs + for _, pdbConfig := range pdbConfigs { + if pdbConfig.PdbName != nil { + // Handle PDB deletion if PluggableDatabaseId is defined and isDelete is true + if pdbConfig.IsDelete != nil && pdbConfig.PluggableDatabaseId != nil && *pdbConfig.IsDelete { + // Call deletePluggableDatabase function + dbSystemId := *dbcsInst.Spec.Id + if err := r.deletePluggableDatabase(ctx, pdbConfig, dbSystemId); err != nil { + result := resultNq + return result, err + } + result := resultNq + return result, err + } + } } + // Remove the finalizer and update the object finalizer.Unregister(r.KubeClient, dbcsInst) r.Logger.Info("Finalizer unregistered successfully.") // Stop reconciliation as the item is being deleted - return ctrl.Result{}, nil + result := resultNq + return result, err } /* Determine whether it's a provision or bind operation */ - lastSucSpec, err := dbcsInst.GetLastSuccessfulSpec() + lastSuccessfullSpec, err := dbcsInst.GetLastSuccessfulSpec() + if err != nil { + return ctrl.Result{}, err + } + lastSuccessfullKMSConfig, err := dbcsInst.GetLastSuccessfulKMSConfig() + if err != nil { + return ctrl.Result{}, err + } + lastSuccessfullKMSStatus, err := dbcsInst.GetLastSuccessfulKMSStatus() if err != nil { return ctrl.Result{}, err } - if dbcsInst.Spec.Id == nil && lastSucSpec == nil { - // If no DbcsSystem ID specified, create a DB System - // ======================== Validate Specs ============== - err = dbcsv1.ValidateSpex(r.Logger, r.KubeClient, r.dbClient, dbcsInst, r.nwClient, r.Recorder) - if err != nil { - return ctrl.Result{}, err - } - r.Logger.Info("DbcsSystem DBSystem provisioning") - dbcsID, err := dbcsv1.CreateAndGetDbcsId(r.Logger, r.KubeClient, r.dbClient, dbcsInst, r.nwClient, r.wrClient) - if err != nil { - r.Logger.Error(err, "Fail to provision and get DbcsSystem System ID") + if lastSuccessfullKMSConfig == nil && lastSuccessfullKMSStatus == nil { - // Change the status to Failed - if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcsInst, databasev1alpha1.Failed, r.nwClient, r.wrClient); statusErr != nil { - return ctrl.Result{}, statusErr + if dbcsInst.Spec.KMSConfig.KeyName != "" { + + kmsVaultClient, err := keymanagement.NewKmsVaultClientWithConfigurationProvider(provider) + + if err != nil { + return ctrl.Result{}, err } - // The reconciler should not requeue since the error returned from OCI during update will not be solved by requeue - return ctrl.Result{}, nil - } - assignDBCSID(dbcsInst, dbcsID) - if err := dbcsv1.UpdateDbcsSystemId(r.KubeClient, dbcsInst); err != nil { - // Change the status to Failed - assignDBCSID(dbcsInst, dbcsID) - if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcsInst, databasev1alpha1.Failed, r.nwClient, r.wrClient); statusErr != nil { - return ctrl.Result{}, statusErr + // Determine the criteria to identify or locate the vault based on provided information + // Example: Using displayName as a unique identifier (assumed to be unique in this context) + displayName := dbcsInst.Spec.KMSConfig.VaultName + + // Check if a vault with the given displayName exists + getVaultReq := keymanagement.ListVaultsRequest{ + CompartmentId: &dbcsInst.Spec.KMSConfig.CompartmentId, // Assuming compartment ID is known or provided } - return ctrl.Result{}, err - } - r.Logger.Info("DbcsSystem system provisioned succesfully") - assignDBCSID(dbcsInst, dbcsID) - if err := dbcsInst.UpdateLastSuccessfulSpec(r.KubeClient); err != nil { - return ctrl.Result{}, err + listResp, err := kmsVaultClient.ListVaults(ctx, getVaultReq) + if err != nil { + return ctrl.Result{}, fmt.Errorf("error listing vaults: %v", err) + } + + var existingVaultId *string + var existingVaultManagementEndpoint *string + var kmsClient keymanagement.KmsManagementClient + // Find the first active vault with matching displayName + for _, vault := range listResp.Items { + if vault.LifecycleState == keymanagement.VaultSummaryLifecycleStateActive && *vault.DisplayName == displayName { + existingVaultId = vault.Id + existingVaultManagementEndpoint = vault.ManagementEndpoint + // Create KMS Management client + kmsClient, err = keymanagement.NewKmsManagementClientWithConfigurationProvider(provider, *existingVaultManagementEndpoint) + if err != nil { + return ctrl.Result{}, err + } + break + } + } + + // If no active vault found, create a new one + if existingVaultId == nil { + + // Create the KMS vault + createResp, err := r.createKMSVault(ctx, &dbcsInst.Spec.KMSConfig, kmsClient, &dbcsInst.Status.KMSDetailsStatus) + if err != nil { + return ctrl.Result{}, fmt.Errorf("error creating vault: %v", err) + } + existingVaultId = createResp.Id + r.Logger.Info("Created vault Id", existingVaultId) + } else { + // Optionally, perform additional checks or operations if needed + r.Logger.Info("Found existing active vault with displayName", "DisplayName", displayName, "VaultId", *existingVaultId) + dbcsInst.Status.KMSDetailsStatus.VaultId = *existingVaultId + dbcsInst.Status.KMSDetailsStatus.ManagementEndpoint = *existingVaultManagementEndpoint + } + if existingVaultId != nil { + + // Find the key ID based on compartmentID in the existing vault + + listKeysReq := keymanagement.ListKeysRequest{ + CompartmentId: &dbcsInst.Spec.KMSConfig.CompartmentId, + } + + var keyId *string + var keyName *string + + // Make a single request to list keys + listKeysResp, err := kmsClient.ListKeys(ctx, listKeysReq) + if err != nil { + r.Logger.Error(err, "Error listing keys in existing vault") + return ctrl.Result{}, err + } + + // Iterate over the keys to find the desired key + for _, key := range listKeysResp.Items { + if key.DisplayName != nil && *key.DisplayName == dbcsInst.Spec.KMSConfig.KeyName { + keyId = key.Id + keyName = key.DisplayName + dbcsInst.Status.KMSDetailsStatus.KeyId = *key.Id + dbcsInst.Status.KMSDetailsStatus.KeyName = *key.DisplayName + break + } + } + + if keyId == nil { + r.Logger.Info("Master key not found in existing vault, creating new key") + + // Create the KMS key in the existing vault + keyResponse, err := r.createKMSKey(ctx, &dbcsInst.Spec.KMSConfig, kmsClient, &dbcsInst.Status.KMSDetailsStatus) + if err != nil { + return ctrl.Result{}, err + } + + // Update the DbSystem with the encryption key ID + dbcsInst.Status.KMSDetailsStatus.KeyId = *keyResponse.Key.Id + dbcsInst.Status.KMSDetailsStatus.KeyName = *keyResponse.Key.DisplayName + } else { + r.Logger.Info("Found existing master key in vault", "KeyName", dbcsInst.Spec.KMSConfig.KeyName, "KeyId", *keyId) + + // Update the DbSystem with the existing encryption key ID + dbcsInst.Status.KMSDetailsStatus.KeyId = *keyId + dbcsInst.Status.KMSDetailsStatus.KeyName = *keyName + } + } else { + r.Logger.Info("Creating new vault") + + // Create the new vault + vaultResponse, err := r.createKMSVault(ctx, &dbcsInst.Spec.KMSConfig, kmsClient, &dbcsInst.Status.KMSDetailsStatus) + if err != nil { + return ctrl.Result{}, err + } + dbcsInst.Status.KMSDetailsStatus.VaultId = *vaultResponse.Id + dbcsInst.Status.KMSDetailsStatus.ManagementEndpoint = *vaultResponse.ManagementEndpoint + // Create the KMS key in the newly created vault + keyResponse, err := r.createKMSKey(ctx, &dbcsInst.Spec.KMSConfig, kmsClient, &dbcsInst.Status.KMSDetailsStatus) + if err != nil { + return ctrl.Result{}, err + } + + // Update the DbSystem with the encryption key ID + dbcsInst.Status.KMSDetailsStatus.KeyId = *keyResponse.Key.Id + dbcsInst.Status.KMSDetailsStatus.KeyName = *keyResponse.Key.DisplayName + + } } - assignDBCSID(dbcsInst, dbcsID) + } + //debugging + // lastSuccessfullSpec = nil + // r.ensureDBSystemSpec(&dbcsInst.Spec.DbSystem) + // Check if cloning is needed, debugging + // *dbcsInst.Status.DbCloneStatus.Id = "" + setupCloning := false + // Check if SetupDBCloning is true and ensure one of the required fields is provided + if dbcsInst.Spec.SetupDBCloning { + // If SetupDBCloning is true, at least one of Id, DbBackupId, or DatabaseId must be non-nil + if dbcsInst.Spec.Id == nil && dbcsInst.Spec.DbBackupId == nil && dbcsInst.Spec.DatabaseId == nil { + // If none of the required fields are set, log an error and exit the function + r.Logger.Error(err, "SetupDBCloning is defined but other necessary details (Id, DbBackupId, DatabaseId) are not present. Refer README.md file for instructions.") + return ctrl.Result{}, nil + } + // If the condition is met, proceed with cloning setup + setupCloning = true } else { - if lastSucSpec == nil { - if err := dbcsv1.GetDbSystemId(r.Logger, r.dbClient, dbcsInst); err != nil { - // Change the status to Failed - if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcsInst, databasev1alpha1.Failed, r.nwClient, r.wrClient); statusErr != nil { + // If SetupDBCloning is false, continue as usual without cloning + setupCloning = false + } + + var dbSystemId string + // Executing DB Cloning Process, if defined. Do not repeat cloning again when Status has Id present. + if setupCloning && dbcsInst.Status.DbCloneStatus.Id == nil { + switch { + + case dbcsInst.Spec.SetupDBCloning && dbcsInst.Spec.DbBackupId != nil: + dbSystemId, err = dbcsv1.CloneFromBackupAndGetDbcsId(r.Logger, r.KubeClient, r.dbClient, dbcsInst, r.nwClient, r.wrClient) + if err != nil { + r.Logger.Error(err, "Fail to clone db system from backup and get DbcsSystem System ID") + if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcsInst, databasev4.Failed, r.nwClient, r.wrClient); statusErr != nil { return ctrl.Result{}, statusErr } - return ctrl.Result{}, err + + return ctrl.Result{}, nil } - if err := dbcsv1.SetDBCSDatabaseLifecycleState(r.Logger, r.KubeClient, r.dbClient, dbcsInst, r.nwClient, r.wrClient); err != nil { - // Change the status to required state + r.Logger.Info("DB Cloning completed successfully from provided backup DB system") + + case dbcsInst.Spec.SetupDBCloning && dbcsInst.Spec.DatabaseId != nil: + dbSystemId, err = dbcsv1.CloneFromDatabaseAndGetDbcsId(r.Logger, r.KubeClient, r.dbClient, dbcsInst, r.nwClient, r.wrClient) + if err != nil { + r.Logger.Error(err, "Fail to clone db system from DatabaseID provided") + if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcsInst, databasev4.Failed, r.nwClient, r.wrClient); statusErr != nil { + return ctrl.Result{}, statusErr + } + + return ctrl.Result{}, nil + } + r.Logger.Info("DB Cloning completed successfully from provided databaseId") + + case dbcsInst.Spec.SetupDBCloning && dbcsInst.Spec.DbBackupId == nil && dbcsInst.Spec.DatabaseId == nil: + dbSystemId, err = dbcsv1.CloneAndGetDbcsId(r.Logger, r.KubeClient, r.dbClient, dbcsInst, r.nwClient, r.wrClient) + if err != nil { + r.Logger.Error(err, "Fail to clone db system and get DbcsSystem System ID") + if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcsInst, databasev4.Failed, r.nwClient, r.wrClient); statusErr != nil { + return ctrl.Result{}, statusErr + } + return ctrl.Result{}, nil + } + r.Logger.Info("DB Cloning completed successfully from provided db system") + } + } else if !setupCloning { + if dbcsInst.Spec.Id == nil && lastSuccessfullSpec == nil { + // If no DbcsSystem ID specified, create a new DB System + // ======================== Validate Specs ============== + err = dbcsv1.ValidateSpex(r.Logger, r.KubeClient, r.dbClient, dbcsInst, r.nwClient, r.Recorder) + if err != nil { return ctrl.Result{}, err } + r.Logger.Info("DbcsSystem DBSystem provisioning") + dbcsID, err := dbcsv1.CreateAndGetDbcsId(r.Logger, r.KubeClient, r.dbClient, dbcsInst, r.nwClient, r.wrClient, &dbcsInst.Status.KMSDetailsStatus) + if err != nil { + r.Logger.Error(err, "Fail to provision and get DbcsSystem System ID") + + // Change the status to Failed + if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcsInst, databasev4.Failed, r.nwClient, r.wrClient); statusErr != nil { + return ctrl.Result{}, statusErr + } + // The reconciler should not requeue since the error returned from OCI during update will not be solved by requeue + return ctrl.Result{}, nil + } - dbcsInstId := *dbcsInst.Spec.Id + assignDBCSID(dbcsInst, dbcsID) + // Check if KMSConfig is specified + kmsConfig := dbcsInst.Spec.KMSConfig + if kmsConfig != (databasev4.KMSConfig{}) { + // Check if KMSDetailsStatus is uninitialized (zero value) + if dbcsInst.Spec.DbSystem.KMSConfig != dbcsInst.Spec.KMSConfig { + dbcsInst.Spec.DbSystem.KMSConfig = dbcsInst.Spec.KMSConfig + } + } if err := dbcsv1.UpdateDbcsSystemId(r.KubeClient, dbcsInst); err != nil { // Change the status to Failed - assignDBCSID(dbcsInst, dbcsInstId) - if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcsInst, databasev1alpha1.Failed, r.nwClient, r.wrClient); statusErr != nil { + assignDBCSID(dbcsInst, dbcsID) + if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcsInst, databasev4.Failed, r.nwClient, r.wrClient); statusErr != nil { return ctrl.Result{}, statusErr } return ctrl.Result{}, err } - r.Logger.Info("Sync information from remote DbcsSystem System successfully") - - dbcsInstId = *dbcsInst.Spec.Id + r.Logger.Info("DbcsSystem system provisioned succesfully") + assignDBCSID(dbcsInst, dbcsID) if err := dbcsInst.UpdateLastSuccessfulSpec(r.KubeClient); err != nil { return ctrl.Result{}, err } - assignDBCSID(dbcsInst, dbcsInstId) + assignDBCSID(dbcsInst, dbcsID) } else { - if dbcsInst.Spec.Id == nil { - dbcsInst.Spec.Id = lastSucSpec.Id - } + if lastSuccessfullSpec == nil { // first time after creation of DB + if err := dbcsv1.GetDbSystemId(r.Logger, r.dbClient, dbcsInst); err != nil { + // Change the status to Failed + if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcsInst, databasev4.Failed, r.nwClient, r.wrClient); statusErr != nil { + return ctrl.Result{}, statusErr + } + return ctrl.Result{}, err + } + if err := dbcsv1.SetDBCSDatabaseLifecycleState(r.Logger, r.KubeClient, r.dbClient, dbcsInst, r.nwClient, r.wrClient); err != nil { + // Change the status to required state + return ctrl.Result{}, err + } - if err := dbcsv1.UpdateDbcsSystemIdInst(r.Logger, r.dbClient, dbcsInst, r.KubeClient, r.nwClient, r.wrClient); err != nil { - r.Logger.Error(err, "Fail to update DbcsSystem Id") + dbSystemId := *dbcsInst.Spec.Id + if err := dbcsv1.UpdateDbcsSystemId(r.KubeClient, dbcsInst); err != nil { + // Change the status to Failed + assignDBCSID(dbcsInst, dbSystemId) + if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcsInst, databasev4.Failed, r.nwClient, r.wrClient); statusErr != nil { + return ctrl.Result{}, statusErr + } + return ctrl.Result{}, err + } - // Change the status to Failed - if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcsInst, databasev1alpha1.Failed, r.nwClient, r.wrClient); statusErr != nil { - return ctrl.Result{}, statusErr + r.Logger.Info("Sync information from remote DbcsSystem System successfully") + + dbSystemId = *dbcsInst.Spec.Id + if err := dbcsInst.UpdateLastSuccessfulSpec(r.KubeClient); err != nil { + return ctrl.Result{}, err + } + assignDBCSID(dbcsInst, dbSystemId) + } else { + dbSystemId := "" + if dbcsInst.Spec.Id == nil { + dbcsInst.Spec.Id = lastSuccessfullSpec.Id + dbSystemId = *dbcsInst.Spec.Id + } else { + dbSystemId = *dbcsInst.Spec.Id + } + //debugging + // *dbcsInst.Spec.Id = "ocid1.dbsystem.oc1.iad.anuwcljsabf7htya55wz5vfil7ul3pkzpubnymp6zrp3fhgomv3fcdr2vtiq" + compartmentId, err := r.getCompartmentIDByDbSystemID(ctx, *dbcsInst.Spec.Id) + if err != nil { + fmt.Printf("Failed to get compartment ID: %v\n", err) + return ctrl.Result{}, err + } + dbHomeId, err := r.getDbHomeIdByDbSystemID(ctx, compartmentId, *dbcsInst.Spec.Id) + if err != nil { + fmt.Printf("Failed to get DB Home ID: %v\n", err) + return ctrl.Result{}, err + } + + databaseIds, err := r.getDatabaseIDByDbSystemID(ctx, *dbcsInst.Spec.Id, compartmentId, dbHomeId) + if err != nil { + fmt.Printf("Failed to get database IDs: %v\n", err) + return ctrl.Result{}, err + } + err = r.getPluggableDatabaseDetails(ctx, dbcsInst, *dbcsInst.Spec.Id, databaseIds) + if err != nil { + fmt.Printf("Failed to get pluggable database details: %v\n", err) + return ctrl.Result{}, err + } + + if err := dbcsv1.UpdateDbcsSystemIdInst(r.Logger, r.dbClient, dbcsInst, r.KubeClient, r.nwClient, r.wrClient, databaseIds[0]); err != nil { + r.Logger.Error(err, "Fail to update DbcsSystem Id") + + // Change the status to Failed + if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcsInst, databasev4.Failed, r.nwClient, r.wrClient); statusErr != nil { + return ctrl.Result{}, statusErr + } + // The reconciler should not requeue since the error returned from OCI during update will not be solved by requeue + return ctrl.Result{}, nil + } + if err := dbcsv1.SetDBCSDatabaseLifecycleState(r.Logger, r.KubeClient, r.dbClient, dbcsInst, r.nwClient, r.wrClient); err != nil { + // Change the status to required state + return ctrl.Result{}, err + } + // Update Spec and Status + result, err := r.updateSpecsAndStatus(ctx, dbcsInst, dbSystemId) + if err != nil { + return result, err } - // The reconciler should not requeue since the error returned from OCI during update will not be solved by requeue - return ctrl.Result{}, nil - } - if err := dbcsv1.SetDBCSDatabaseLifecycleState(r.Logger, r.KubeClient, r.dbClient, dbcsInst, r.nwClient, r.wrClient); err != nil { - // Change the status to required state - return ctrl.Result{}, err } } } @@ -261,21 +539,914 @@ func (r *DbcsSystemReconciler) Reconcile(ctx context.Context, req ctrl.Request) //r.updateWalletSecret(dbcs) // Update the last succesful spec - dbcsInstId := *dbcsInst.Spec.Id - if err := dbcsInst.UpdateLastSuccessfulSpec(r.KubeClient); err != nil { - return ctrl.Result{}, err + if dbcsInst.Spec.Id != nil { + dbSystemId = *dbcsInst.Spec.Id + + if err := dbcsInst.UpdateLastSuccessfulSpec(r.KubeClient); err != nil { + return ctrl.Result{}, err + } + } else if dbcsInst.Status.DbCloneStatus.Id != nil { + dbSystemId = *dbcsInst.Status.DbCloneStatus.Id } //assignDBCSID(dbcsInst,dbcsI) // Change the phase to "Available" - assignDBCSID(dbcsInst, dbcsInstId) - if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcsInst, databasev1alpha1.Available, r.nwClient, r.wrClient); statusErr != nil { + assignDBCSID(dbcsInst, dbSystemId) + if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcsInst, databasev4.Available, r.nwClient, r.wrClient); statusErr != nil { return ctrl.Result{}, statusErr } - return ctrl.Result{}, nil + r.Logger.Info("DBInst after assignment", "dbcsInst:->", dbcsInst) + + // Check if specified PDB exists or needs to be created + exists, err := r.validatePDBExistence(dbcsInst) + if err != nil { + fmt.Printf("Failed to get PDB Details: %v\n", err) + return ctrl.Result{}, err + } + if dbcsInst.Spec.PdbConfigs != nil { + if !exists { + for _, pdbConfig := range dbcsInst.Spec.PdbConfigs { + if pdbConfig.PdbName != nil { + // Get database details + // Get DB Home ID by DB System ID + // Get Compartment ID by DB System ID + compartmentId, err := r.getCompartmentIDByDbSystemID(ctx, dbSystemId) + if err != nil { + fmt.Printf("Failed to get compartment ID: %v\n", err) + return ctrl.Result{}, err + } + dbHomeId, err := r.getDbHomeIdByDbSystemID(ctx, compartmentId, dbSystemId) + if err != nil { + fmt.Printf("Failed to get DB Home ID: %v\n", err) + return ctrl.Result{}, err + } + databaseIds, err := r.getDatabaseIDByDbSystemID(ctx, dbSystemId, compartmentId, dbHomeId) + if err != nil { + fmt.Printf("Failed to get database IDs: %v\n", err) + return ctrl.Result{}, err + } + + // Now you can use dbDetails to access database attributes + r.Logger.Info("Database details fetched successfully", "DatabaseId", databaseIds) + + // Check if deletion is requested + if pdbConfig.IsDelete != nil && *pdbConfig.IsDelete { + // Call deletePluggableDatabase function + if err := r.deletePluggableDatabase(ctx, pdbConfig, dbSystemId); err != nil { + return ctrl.Result{}, err + } + // Continue to the next pdbConfig + continue + } else { + // Call the method to create the pluggable database + r.Logger.Info("Calling createPluggableDatabase", "ctx:->", ctx, "dbcsInst:->", dbcsInst, "databaseIds:->", databaseIds[0], "compartmentId:->", compartmentId) + pdbId, err := r.createPluggableDatabase(ctx, dbcsInst, pdbConfig, databaseIds[0], compartmentId, dbSystemId) + if err != nil { + // Handle error if required + return ctrl.Result{}, err + } + + // Create or update the PDBConfigStatus in DbcsSystemStatus + pdbConfigStatus := databasev4.PDBConfigStatus{ + PdbName: pdbConfig.PdbName, + ShouldPdbAdminAccountBeLocked: pdbConfig.ShouldPdbAdminAccountBeLocked, + PdbLifecycleState: databasev4.Available, + FreeformTags: pdbConfig.FreeformTags, + PluggableDatabaseId: &pdbId, + } + + // Create a map to track existing PDBConfigStatus by PdbName + pdbDetailsMap := make(map[string]databasev4.PDBConfigStatus) + + // Populate the map with existing PDBConfigStatus from dbcsInst.Status.PdbDetailsStatus + for _, pdbDetails := range dbcsInst.Status.PdbDetailsStatus { + for _, existingPdbConfig := range pdbDetails.PDBConfigStatus { + pdbDetailsMap[*existingPdbConfig.PdbName] = existingPdbConfig + } + } + + // Update the map with the new or updated PDBConfigStatus + pdbDetailsMap[*pdbConfig.PdbName] = pdbConfigStatus + + // Convert the map back to a slice of PDBDetailsStatus + var updatedPdbDetailsStatus []databasev4.PDBDetailsStatus + for _, pdbConfigStatus := range pdbDetailsMap { + updatedPdbDetailsStatus = append(updatedPdbDetailsStatus, databasev4.PDBDetailsStatus{ + PDBConfigStatus: []databasev4.PDBConfigStatus{pdbConfigStatus}, + }) + } + + // Assign the updated slice to dbcsInst.Status.PdbDetailsStatus + dbcsInst.Status.PdbDetailsStatus = updatedPdbDetailsStatus + // Update the status in Kubernetes + // Update the status subresource + err = r.KubeClient.Status().Update(ctx, dbcsInst) + if err != nil { + r.Logger.Error(err, "Failed to update DB status") + return reconcile.Result{}, err + } + + } + } + } + } else { + r.Logger.Info("No change in PDB configurations or, already existed PDB Status.") + } + } + // } else { + // r.Logger.Info("No PDB configurations given.") + // } + // r.Logger.Info("DBInst after assignment", "dbcsInst:->", dbcsInst) + // // Check if PDBConfig is defined and needs to be created or deleted + pdbConfigs := dbcsInst.Spec.PdbConfigs + if pdbConfigs != nil { + for _, pdbConfig := range pdbConfigs { + if pdbConfig.PdbName != nil { + // Get database details + // Get DB Home ID by DB System ID + // Get Compartment ID by DB System ID + compartmentId, err := r.getCompartmentIDByDbSystemID(ctx, dbSystemId) + if err != nil { + fmt.Printf("Failed to get compartment ID: %v\n", err) + return ctrl.Result{}, err + } + dbHomeId, err := r.getDbHomeIdByDbSystemID(ctx, compartmentId, dbSystemId) + if err != nil { + fmt.Printf("Failed to get DB Home ID: %v\n", err) + return ctrl.Result{}, err + } + databaseIds, err := r.getDatabaseIDByDbSystemID(ctx, dbSystemId, compartmentId, dbHomeId) + if err != nil { + fmt.Printf("Failed to get database IDs: %v\n", err) + return ctrl.Result{}, err + } + + // Now you can use dbDetails to access database attributes + r.Logger.Info("Database details fetched successfully", "DatabaseId", databaseIds) + + // Check if deletion is requested + if pdbConfig.IsDelete != nil && *pdbConfig.IsDelete { + // Call deletePluggableDatabase function + if err := r.deletePluggableDatabase(ctx, pdbConfig, dbSystemId); err != nil { + return ctrl.Result{}, err + } + // Continue to the next pdbConfig + continue + } else { + // Call the method to create the pluggable database + r.Logger.Info("Calling createPluggableDatabase", "ctx:->", ctx, "dbcsInst:->", dbcsInst, "databaseIds:->", databaseIds[0], "compartmentId:->", compartmentId) + _, err := r.createPluggableDatabase(ctx, dbcsInst, pdbConfig, databaseIds[0], compartmentId, dbSystemId) + if err != nil { + // Handle error if required + return ctrl.Result{}, err + } + } + } + } + } + + return resultQ, nil + +} +func (r *DbcsSystemReconciler) updateSpecsAndStatus(ctx context.Context, dbcsInst *databasev4.DbcsSystem, dbSystemId string) (reconcile.Result, error) { + + // Retry mechanism for handling resource version conflicts + retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + // Fetch the latest version of the resource + latestDbcsInst := &databasev4.DbcsSystem{} + err := r.KubeClient.Get(ctx, types.NamespacedName{ + Name: dbcsInst.Name, + Namespace: dbcsInst.Namespace, + }, latestDbcsInst) + if err != nil { + r.Logger.Error(err, "Failed to fetch the latest DB resource") + return err + } + + // Update the Spec subresource + latestDbcsInst.Spec.Id = &dbSystemId + err = r.KubeClient.Update(ctx, latestDbcsInst) + if err != nil { + r.Logger.Error(err, "Failed to update DB Spec") + return err + } + + // Update the Status subresource + + // Update the Status subresource + originalStatus := reflect.ValueOf(&dbcsInst.Status).Elem() + latestStatus := reflect.ValueOf(&latestDbcsInst.Status).Elem() + + // Iterate over all fields in the Status struct and update them + for i := 0; i < originalStatus.NumField(); i++ { + fieldName := originalStatus.Type().Field(i).Name + latestStatus.FieldByName(fieldName).Set(originalStatus.Field(i)) + } + + err = r.KubeClient.Status().Update(ctx, latestDbcsInst) + if err != nil { + r.Logger.Error(err, "Failed to update DB status") + return err + } + + return nil + }) + + if retryErr != nil { + r.Logger.Error(retryErr, "Failed to update DB Spec and Status after retries") + return reconcile.Result{}, retryErr + } + + r.Logger.Info("Successfully updated Spec and Status") + return reconcile.Result{}, nil +} + +// getDbHomeIdByDbSystemID retrieves the DB Home ID associated with the given DB System ID +func (r *DbcsSystemReconciler) getDbHomeIdByDbSystemID(ctx context.Context, compartmentId, dbSystemId string) (string, error) { + listRequest := database.ListDbHomesRequest{ + CompartmentId: &compartmentId, + DbSystemId: &dbSystemId, + } + + listResponse, err := r.dbClient.ListDbHomes(ctx, listRequest) + if err != nil { + return "", fmt.Errorf("failed to list DB homes: %v", err) + } + + if len(listResponse.Items) == 0 { + return "", fmt.Errorf("no DB homes found for DB system ID: %s", dbSystemId) + } + + return *listResponse.Items[0].Id, nil +} +func (r *DbcsSystemReconciler) getCompartmentIDByDbSystemID(ctx context.Context, dbSystemId string) (string, error) { + // Construct the GetDbSystem request + getRequest := database.GetDbSystemRequest{ + DbSystemId: &dbSystemId, + } + + // Call GetDbSystem API using the existing dbClient + getResponse, err := r.dbClient.GetDbSystem(ctx, getRequest) + if err != nil { + return "", fmt.Errorf("failed to get DB system details: %v", err) + } + + // Extract the compartment ID from the DB system details + compartmentId := *getResponse.DbSystem.CompartmentId + + return compartmentId, nil +} +func (r *DbcsSystemReconciler) getDatabaseIDByDbSystemID(ctx context.Context, dbSystemId, compartmentId, dbHomeId string) ([]string, error) { + // Construct the ListDatabases request + request := database.ListDatabasesRequest{ + SystemId: &dbSystemId, + CompartmentId: &compartmentId, + DbHomeId: &dbHomeId, + } + + // Call ListDatabases API using the existing dbClient + response, err := r.dbClient.ListDatabases(ctx, request) + if err != nil { + return nil, fmt.Errorf("failed to list databases: %v", err) + } + + // Extract database IDs from the response + var databaseIds []string + for _, dbSummary := range response.Items { + databaseIds = append(databaseIds, *dbSummary.Id) + } + + return databaseIds, nil +} +func (r *DbcsSystemReconciler) validatePDBExistence(dbcs *databasev4.DbcsSystem) (bool, error) { + r.Logger.Info("Validating PDB existence for all provided PDBs") + + // Iterate over each PDBConfig in Spec.PdbConfigs + for _, pdbConfig := range dbcs.Spec.PdbConfigs { + pdbName := pdbConfig.PdbName + r.Logger.Info("Checking PDB existence in Status", "PDBName", *pdbName) + + found := false + + // Check if the PDB exists in Status.PdbDetailsStatus with a state of "Available" + for _, pdbDetailsStatus := range dbcs.Status.PdbDetailsStatus { + for _, pdbStatus := range pdbDetailsStatus.PDBConfigStatus { + if pdbStatus.PdbName != nil && *pdbStatus.PdbName == *pdbName && pdbStatus.PdbLifecycleState == "AVAILABLE" { + found = true + break + } + } + if found { + break + } + } + + if !found { + r.Logger.Info("Pluggable database does not exist or is not available in Status.PdbDetailsStatus", "PDBName", *pdbName) + return false, nil + } + } + + // If all PDBs are found and available + r.Logger.Info("All specified PDBs are available") + return true, nil +} +func (r *DbcsSystemReconciler) createPluggableDatabase(ctx context.Context, dbcs *databasev4.DbcsSystem, pdbConfig databasev4.PDBConfig, databaseId, compartmentId, dbSystemId string) (string, error) { + r.Logger.Info("Checking if the pluggable database exists", "PDBName", pdbConfig.PdbName) + + // Check if the pluggable database already exists + exists, pdbId, err := r.doesPluggableDatabaseExist(ctx, compartmentId, pdbConfig.PdbName, databaseId) + if err != nil { + r.Logger.Error(err, "Failed to check if pluggable database exists", "PDBName", pdbConfig.PdbName) + return "", err + } + if exists { + // Set the PluggableDatabaseId in PDBConfig + pdbConfig.PluggableDatabaseId = pdbId + r.Logger.Info("Pluggable database already exists", "PDBName", pdbConfig.PdbName, "PluggableDatabaseId", *pdbConfig.PluggableDatabaseId) + return *pdbId, nil + } + + // Define the DatabaseExists method locally + databaseExists := func(dbSystemID string) (bool, error) { + req := database.GetDbSystemRequest{ + DbSystemId: &dbSystemID, + } + _, err := r.dbClient.GetDbSystem(ctx, req) + if err != nil { + if ociErr, ok := err.(common.ServiceError); ok && ociErr.GetHTTPStatusCode() == 404 { + return false, nil + } + return false, err + } + return true, nil + } + + exists, err = databaseExists(dbSystemId) + if err != nil { + r.Logger.Error(err, "Failed to check database existence") + return "", err + } + + if !exists { + errMsg := fmt.Sprintf("Database does not exist: %s", dbSystemId) + r.Logger.Error(fmt.Errorf(errMsg), "Database not found") + return "", fmt.Errorf(errMsg) + } + + // Fetch secrets for TdeWalletPassword and PdbAdminPassword + tdeWalletPassword, err := r.getSecret(ctx, dbcs.Namespace, *pdbConfig.TdeWalletPassword) + // Trim newline character from the password + tdeWalletPassword = strings.TrimSpace(tdeWalletPassword) + r.Logger.Info("TDE wallet password retrieved successfully") + if err != nil { + r.Logger.Error(err, "Failed to get TDE wallet password secret") + return "", err + } + + pdbAdminPassword, err := r.getSecret(ctx, dbcs.Namespace, *pdbConfig.PdbAdminPassword) + // Trim newline character from the password + pdbAdminPassword = strings.TrimSpace(pdbAdminPassword) + r.Logger.Info("PDB admin password retrieved successfully") + if err != nil { + r.Logger.Error(err, "Failed to get PDB admin password secret") + return "", err + } + // Change the status to Provisioning + if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcs, databasev4.Provision, r.nwClient, r.wrClient); statusErr != nil { + r.Logger.Error(err, "Failed to set DBCS LifeCycle State to Provisioning") + return "", statusErr + } + r.Logger.Info("Updated DBCS LifeCycle State to Provisioning") + // Proceed with creating the pluggable database + r.Logger.Info("Creating pluggable database", "PDBName", pdbConfig.PdbName) + createPdbReq := database.CreatePluggableDatabaseRequest{ + CreatePluggableDatabaseDetails: database.CreatePluggableDatabaseDetails{ + PdbName: pdbConfig.PdbName, + ContainerDatabaseId: &databaseId, + ShouldPdbAdminAccountBeLocked: pdbConfig.ShouldPdbAdminAccountBeLocked, + PdbAdminPassword: common.String(pdbAdminPassword), + TdeWalletPassword: common.String(tdeWalletPassword), + FreeformTags: pdbConfig.FreeformTags, + }, + } + response, err := r.dbClient.CreatePluggableDatabase(ctx, createPdbReq) + if err != nil { + r.Logger.Error(err, "Failed to create pluggable database", "PDBName", pdbConfig.PdbName) + return "", err + } + // Set the PluggableDatabaseId in PDBConfig + pdbConfig.PluggableDatabaseId = response.PluggableDatabase.Id + + r.Logger.Info("Pluggable database creation initiated", "PDBName", pdbConfig.PdbName, "PDBID", *pdbConfig.PluggableDatabaseId) + + // Polling mechanism to check PDB status + const maxRetries = 120 // total 1 hour wait for creation of PDB + const retryInterval = 30 // in seconds + + for i := 0; i < maxRetries; i++ { + getPdbReq := database.GetPluggableDatabaseRequest{ + PluggableDatabaseId: pdbConfig.PluggableDatabaseId, + } + + getPdbResp, err := r.dbClient.GetPluggableDatabase(ctx, getPdbReq) + if err != nil { + r.Logger.Error(err, "Failed to get pluggable database status", "PDBID", *pdbConfig.PluggableDatabaseId) + return "", err + } + + pdbStatus := getPdbResp.PluggableDatabase.LifecycleState + r.Logger.Info("Checking pluggable database status", "PDBID", *pdbConfig.PluggableDatabaseId, "Status", pdbStatus) + + if pdbStatus == database.PluggableDatabaseLifecycleStateAvailable { + r.Logger.Info("Pluggable database successfully created", "PDBName", pdbConfig.PdbName, "PDBID", *pdbConfig.PluggableDatabaseId) + // Change the status to Available + if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcs, databasev4.Available, r.nwClient, r.wrClient); statusErr != nil { + return "", statusErr + } + return *response.PluggableDatabase.Id, nil + } + + if pdbStatus == database.PluggableDatabaseLifecycleStateFailed { + r.Logger.Error(fmt.Errorf("pluggable database creation failed"), "PDBName", pdbConfig.PdbName, "PDBID", *pdbConfig.PluggableDatabaseId) + // Change the status to Failed + if statusErr := dbcsv1.SetLifecycleState(r.KubeClient, r.dbClient, dbcs, databasev4.Failed, r.nwClient, r.wrClient); statusErr != nil { + return "", statusErr + } + return "", fmt.Errorf("pluggable database creation failed") + } + + time.Sleep(retryInterval * time.Second) + } + + r.Logger.Error(fmt.Errorf("timed out waiting for pluggable database to become available"), "PDBName", pdbConfig.PdbName, "PDBID", *pdbConfig.PluggableDatabaseId) + return "", fmt.Errorf("timed out waiting for pluggable database to become available") +} + +func (r *DbcsSystemReconciler) pluggableDatabaseExists(ctx context.Context, pluggableDatabaseId string) (bool, error) { + req := database.GetPluggableDatabaseRequest{ + PluggableDatabaseId: &pluggableDatabaseId, + } + _, err := r.dbClient.GetPluggableDatabase(ctx, req) + if err != nil { + if ociErr, ok := err.(common.ServiceError); ok && ociErr.GetHTTPStatusCode() == 404 { + // PDB does not exist + return false, nil + } + // Other error occurred + return false, err + } + // PDB exists + return true, nil +} + +func (r *DbcsSystemReconciler) deletePluggableDatabase(ctx context.Context, pdbConfig databasev4.PDBConfig, dbSystemId string) error { + if pdbConfig.PdbName == nil { + return fmt.Errorf("PDB name is not specified") + } + + r.Logger.Info("Deleting pluggable database", "PDBName", *pdbConfig.PdbName) + + if pdbConfig.PluggableDatabaseId == nil { + r.Logger.Info("PluggableDatabaseId is not specified, getting pluggable databaseID") + // Call a function to retrieve PluggableDatabaseId + pdbID, err := r.getPluggableDatabaseID(ctx, pdbConfig, dbSystemId) + if err != nil { + return fmt.Errorf("failed to get PluggableDatabaseId: %v", err) + } + pdbConfig.PluggableDatabaseId = &pdbID + } + + // Now pdbConfig.PluggableDatabaseId should not be nil + if pdbConfig.PluggableDatabaseId == nil { + return fmt.Errorf("PluggableDatabaseId is still nil after retrieval attempt. Nothing to delete") + } + + // Check if PluggableDatabaseId exists in the live system + exists, err := r.pluggableDatabaseExists(ctx, *pdbConfig.PluggableDatabaseId) + if err != nil { + r.Logger.Error(err, "Failed to check if pluggable database exists", "PluggableDatabaseId", *pdbConfig.PluggableDatabaseId) + return err + } + if !exists { + r.Logger.Info("PluggableDatabaseId does not exist in the live system, nothing to delete", "PluggableDatabaseId", *pdbConfig.PluggableDatabaseId) + return nil + } + + // Define the delete request + deleteReq := database.DeletePluggableDatabaseRequest{ + PluggableDatabaseId: pdbConfig.PluggableDatabaseId, + } + + // Call OCI SDK to delete the PDB + _, err = r.dbClient.DeletePluggableDatabase(ctx, deleteReq) + if err != nil { + r.Logger.Error(err, "Failed to delete pluggable database", "PDBName", *pdbConfig.PdbName) + return err + } + + r.Logger.Info("Successfully deleted pluggable database", "PDBName", *pdbConfig.PdbName) + return nil +} + +func (r *DbcsSystemReconciler) getPluggableDatabaseID(ctx context.Context, pdbConfig databasev4.PDBConfig, dbSystemId string) (string, error) { + compartmentId, err := r.getCompartmentIDByDbSystemID(ctx, dbSystemId) + if err != nil { + fmt.Printf("Failed to get compartment ID: %v\n", err) + return "", err + } + request := database.ListPluggableDatabasesRequest{ + CompartmentId: &compartmentId, + } + + response, err := r.dbClient.ListPluggableDatabases(ctx, request) + if err != nil { + return "", fmt.Errorf("failed to list Pluggable Databases: %v", err) + } + + var pdbID string + + for _, pdb := range response.Items { + if *pdb.PdbName == *pdbConfig.PdbName { + pdbID = *pdb.Id + break + } + } + + if pdbID == "" { + return "", fmt.Errorf("pluggable database '%s' not found", *pdbConfig.PdbName) + } + return pdbID, nil +} + +func (r *DbcsSystemReconciler) getPluggableDatabaseDetails(ctx context.Context, dbcsInst *databasev4.DbcsSystem, dbSystemId string, databaseIds []string) error { + compartmentId, err := r.getCompartmentIDByDbSystemID(ctx, dbSystemId) + if err != nil { + fmt.Printf("Failed to get compartment ID: %v\n", err) + return err + } + request := database.ListPluggableDatabasesRequest{ + CompartmentId: &compartmentId, + } + + response, err := r.dbClient.ListPluggableDatabases(ctx, request) + if err != nil { + return fmt.Errorf("failed to list Pluggable Databases: %v", err) + } + + // Create a map to track existing PDBDetailsStatus by PdbName + pdbDetailsMap := make(map[string]databasev4.PDBConfigStatus) + + // Populate the map with existing PDBDetailsStatus from dbcsInst.Status.PdbDetailsStatus + // for _, existingPdbDetails := range dbcsInst.Status.PdbDetailsStatus { + // for _, existingPdbConfig := range existingPdbDetails.PDBConfigStatus { + // pdbDetailsMap[*existingPdbConfig.PdbName] = existingPdbConfig + // } + // } + // Convert databaseIds array to a set for quick lookup + databaseIdsSet := make(map[string]struct{}) + for _, id := range databaseIds { + databaseIdsSet[id] = struct{}{} + } + // Update the map with new PDB details from the response + for _, pdb := range response.Items { + if pdb.ContainerDatabaseId != nil { + // Check if the ContainerDatabaseId is in the set of databaseIds + if _, exists := databaseIdsSet[*pdb.ContainerDatabaseId]; exists { + pdbConfigStatus := databasev4.PDBConfigStatus{ + PdbName: pdb.PdbName, + ShouldPdbAdminAccountBeLocked: pdb.IsRestricted, + FreeformTags: pdb.FreeformTags, + PluggableDatabaseId: pdb.Id, + PdbLifecycleState: convertLifecycleState(pdb.LifecycleState), + } + + // Update the map with the new or updated PDBConfigStatus + pdbDetailsMap[*pdb.PdbName] = pdbConfigStatus + } + } + } + + // Convert the map back to a slice of PDBDetailsStatus + var updatedPdbDetailsStatus []databasev4.PDBDetailsStatus + for _, pdbConfigStatus := range pdbDetailsMap { + updatedPdbDetailsStatus = append(updatedPdbDetailsStatus, databasev4.PDBDetailsStatus{ + PDBConfigStatus: []databasev4.PDBConfigStatus{pdbConfigStatus}, + }) + } + + // Assign the updated slice to dbcsInst.Status.PdbDetailsStatus + dbcsInst.Status.PdbDetailsStatus = updatedPdbDetailsStatus + + return nil +} + +func convertLifecycleState(state database.PluggableDatabaseSummaryLifecycleStateEnum) databasev4.LifecycleState { + switch state { + case database.PluggableDatabaseSummaryLifecycleStateProvisioning: + return databasev4.Provision + case database.PluggableDatabaseSummaryLifecycleStateAvailable: + return databasev4.Available + case database.PluggableDatabaseSummaryLifecycleStateTerminating: + return databasev4.Terminate + case database.PluggableDatabaseSummaryLifecycleStateTerminated: + return databasev4.LifecycleState(databasev4.Terminated) + case database.PluggableDatabaseSummaryLifecycleStateUpdating: + return databasev4.Update + case database.PluggableDatabaseSummaryLifecycleStateFailed: + return databasev4.Failed + default: + return databasev4.Failed + } +} + +// doesPluggableDatabaseExist checks if a pluggable database with the given name exists +func (r *DbcsSystemReconciler) doesPluggableDatabaseExist(ctx context.Context, compartmentId string, pdbName *string, databaseId string) (bool, *string, error) { + if pdbName == nil { + return false, nil, fmt.Errorf("pdbName is nil") + } + + listPdbsReq := database.ListPluggableDatabasesRequest{ + CompartmentId: &compartmentId, + } + + resp, err := r.dbClient.ListPluggableDatabases(ctx, listPdbsReq) + if err != nil { + return false, nil, err + } + + for _, pdb := range resp.Items { + if pdb.ContainerDatabaseId != nil { + if pdb.PdbName != nil && *pdb.PdbName == *pdbName && pdb.LifecycleState != "TERMINATED" && *pdb.ContainerDatabaseId == databaseId { + return true, pdb.Id, nil + } + } + } + + return false, nil, nil +} + +// Function to create KMS vault +func (r *DbcsSystemReconciler) createKMSVault(ctx context.Context, kmsConfig *databasev4.KMSConfig, kmsClient keymanagement.KmsManagementClient, kmsInst *databasev4.KMSDetailsStatus) (*keymanagement.CreateVaultResponse, error) { + // Dereference the ConfigurationProvider pointer + configProvider := *kmsClient.ConfigurationProvider() + + kmsVaultClient, err := keymanagement.NewKmsVaultClientWithConfigurationProvider(configProvider) + if err != nil { + r.Logger.Error(err, "Error creating KMS vault client") + return nil, err + } + var vaultType keymanagement.CreateVaultDetailsVaultTypeEnum + + if kmsConfig.VaultType != "" { + switch kmsConfig.VaultType { + case "VIRTUAL_PRIVATE": + vaultType = keymanagement.CreateVaultDetailsVaultTypeVirtualPrivate + case "EXTERNAL": + vaultType = keymanagement.CreateVaultDetailsVaultTypeExternal + case "DEFAULT": + vaultType = keymanagement.CreateVaultDetailsVaultTypeDefault + default: + err := fmt.Errorf("unsupported VaultType specified: %s", kmsConfig.VaultType) + r.Logger.Error(err, "unsupported VaultType specified") + return nil, err + } + } else { + // Default to DEFAULT if kmsConfig.VaultType is not defined + vaultType = keymanagement.CreateVaultDetailsVaultTypeDefault + } + + createVaultReq := keymanagement.CreateVaultRequest{ + CreateVaultDetails: keymanagement.CreateVaultDetails{ + CompartmentId: common.String(kmsConfig.CompartmentId), + DisplayName: common.String(kmsConfig.VaultName), + VaultType: vaultType, + }, + } + + resp, err := kmsVaultClient.CreateVault(ctx, createVaultReq) + if err != nil { + r.Logger.Error(err, "Error creating KMS vault") + return nil, err + } + // Wait until vault becomes active or timeout + timeout := time.After(5 * time.Minute) // Example timeout: 5 minutes + ticker := time.NewTicker(10 * time.Second) + defer ticker.Stop() + + for { + select { + case <-timeout: + r.Logger.Error(err, "timed out waiting for vault to become active") + case <-ticker.C: + getVaultReq := keymanagement.GetVaultRequest{ + VaultId: resp.Id, + } + + getResp, err := kmsVaultClient.GetVault(ctx, getVaultReq) + if err != nil { + r.Logger.Error(err, "Error getting vault status") + return nil, err + } + + if getResp.LifecycleState == keymanagement.VaultLifecycleStateActive { + r.Logger.Info("KMS vault created successfully and active") + // Save the vault details into KMSConfig + kmsInst.VaultId = *getResp.Vault.Id + kmsInst.ManagementEndpoint = *getResp.Vault.ManagementEndpoint + kmsInst.VaultName = *getResp.DisplayName + kmsInst.CompartmentId = *getResp.CompartmentId + kmsInst.VaultType = kmsConfig.VaultType + return &keymanagement.CreateVaultResponse{}, err + } + + r.Logger.Info(fmt.Sprintf("Vault state: %s, waiting for active state...", string(getResp.LifecycleState))) + } + } +} + +// Function to create KMS key +func (r *DbcsSystemReconciler) createKMSKey(ctx context.Context, kmsConfig *databasev4.KMSConfig, kmsClient keymanagement.KmsManagementClient, kmsInst *databasev4.KMSDetailsStatus) (*keymanagement.CreateKeyResponse, error) { + // Determine the KeyShape based on the encryption algorithm + var algorithm keymanagement.KeyShapeAlgorithmEnum + var keyLength int + switch kmsConfig.EncryptionAlgo { + case "AES": + algorithm = keymanagement.KeyShapeAlgorithmAes + keyLength = 32 + case "RSA": + algorithm = keymanagement.KeyShapeAlgorithmRsa + keyLength = 512 + default: + // Default to AES if the provided algorithm is unsupported + algorithm = keymanagement.KeyShapeAlgorithmAes + keyLength = 32 + r.Logger.Info("Unsupported encryption algorithm. Defaulting to AES.") + } + + // Create the key shape with the algorithm + keyShape := keymanagement.KeyShape{ + Algorithm: algorithm, + Length: common.Int(keyLength), + } + + createKeyReq := keymanagement.CreateKeyRequest{ + CreateKeyDetails: keymanagement.CreateKeyDetails{ + CompartmentId: common.String(kmsConfig.CompartmentId), + DisplayName: common.String(kmsConfig.KeyName), + KeyShape: &keyShape, + }, + RequestMetadata: common.RequestMetadata{}, + } + + // Call CreateKey without vaultID + resp, err := kmsClient.CreateKey(ctx, createKeyReq) + if err != nil { + r.Logger.Error(err, "Error creating KMS key:") + return nil, err + } + + r.Logger.Info("KMS key created successfully:", resp) + kmsInst.KeyId = *resp.Key.Id + kmsInst.EncryptionAlgo = string(algorithm) + return &resp, nil +} + +func (r *DbcsSystemReconciler) getSecret(ctx context.Context, namespace, secretName string) (string, error) { + secret := &corev1.Secret{} + err := r.KubeClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: secretName}, secret) + if err != nil { + return "", err + } + + // Assume the secret contains only one key-value pair + for _, value := range secret.Data { + return string(value), nil + } + + return "", fmt.Errorf("secret %s is empty", secretName) +} + +// func (r *DbcsSystemReconciler) cloneDbSystem(ctx context.Context, dbcsInst *databasev4.DbcsSystem, provider common.ConfigurationProvider) error { + +// // Initialize OCI clients +// dbClient, err := database.NewDatabaseClientWithConfigurationProvider(provider) +// if err != nil { +// return fmt.Errorf("failed to create OCI database client: %v", err) +// } + +// // Get DB System details +// compartmentId, err := r.getCompartmentIDByDbSystemID(ctx, *dbcsInst.Status.Id) +// if err != nil { +// fmt.Printf("Failed to get compartment ID: %v\n", err) +// return err +// } + +// dbHomeId, err := r.getDbHomeIdByDbSystemID(ctx, compartmentId, *dbcsInst.Status.Id) +// if err != nil { +// fmt.Printf("Failed to get DB Home ID: %v\n", err) +// return err +// } + +// databaseIds, err := r.getDatabaseIDByDbSystemID(ctx, *dbcsInst.Status.Id, compartmentId, dbHomeId) +// if err != nil { +// fmt.Printf("Failed to get database IDs: %v\n", err) +// return err +// } + +// // Use the first database ID for cloning +// if len(databaseIds) == 0 { +// return fmt.Errorf("no databases found in the DB system") +// } + +// // Retrieve details of the database to clone +// sourceDatabaseId := databaseIds[0] +// _, err = dbClient.GetDatabase(ctx, database.GetDatabaseRequest{ +// DatabaseId: common.String(sourceDatabaseId), +// }) +// if err != nil { +// return fmt.Errorf("failed to get source database details: %v", err) +// } + +// // adminPassword, err := dbcsv1.GetAdminPassword(kubeClient, dbcsInstance) +// // if err != nil { +// // log.Fatalf("Error getting admin password: %v", err) +// // } + +// // tdePassword, err := GetTdePassword(kubeClient, dbcsInstance) +// // if err != nil { +// // log.Fatalf("Error getting TDE password: %v", err) +// // } + +// // Define the details for creating the database from the existing DB system +// // createDatabaseDetails := CreateDatabaseBaseWrapper{ +// // CreateDatabaseFromDbSystemDetails: database.CreateDatabaseFromDbSystemDetails{ +// // AdminPassword: common.String(adminPassword), // Replace with actual admin password +// // DbName: common.String(dbcsInst.Spec.DbSystem.DbName), // Use the dbName from DbcsSystemSpec +// // DbDomain: common.String(dbcsInst.Spec.DbSystem.DbDomain), // Use the dbDomain from DbcsSystemSpec +// // DbUniqueName: common.String(dbcsInst.Spec.DbSystem.DbUniqueName), // Use the dbUniqueName from DbcsSystemSpec +// // DbBackupConfig: &database.DbBackupConfig{ +// // AutoBackupEnabled: dbcsInst.Spec.DbSystem.DbBackupConfig.AutoBackupEnabled, +// // RecoveryWindowInDays: dbcsInst.Spec.DbSystem.DbBackupConfig.RecoveryWindowsInDays, +// // }, +// // FreeformTags: dbcsInst.Spec.DbSystem.Tags, +// // DefinedTags: map[string]map[string]interface{}{ +// // "Namespace": { +// // "TagKey": "TagValue", // Replace with actual defined tags if needed +// // }, +// // }, +// // }, +// // } +// // createDatabaseRequest := database.CreateDatabaseRequest{ +// // CreateNewDatabaseDetails: &createDatabaseDetails, +// // } + +// // createDatabaseResponse, err := dbClient.CreateDatabase(ctx, createDatabaseRequest) +// // if err != nil { +// // return fmt.Errorf("failed to create database from DB system: %v", err) +// // } + +// // // Update instance status with the new database ID +// // dbcsInst.Status.DbInfo = append(dbcsInst.Status.DbInfo, databasev4.DbStatus{ +// // Id: createDatabaseResponse.Database.Id, +// // DbName: dbcsInst.Spec.DbSystem.DbName, +// // DbUniqueName: dbcsInst.Spec.DbSystem.DbUniqueName, +// // }) + +// // err = r.KubeClient.Status().Update(ctx, dbcsInst) +// // if err != nil { +// // return fmt.Errorf("failed to update instance status with database ID: %v", err) +// // } + +// return nil +// } + +// Convert DbBackupConfigAutoBackupWindowEnum to *string +func autoBackupWindowEnumToStringPtr(enum *database.DbBackupConfigAutoBackupWindowEnum) *string { + if enum == nil { + return nil + } + value := string(*enum) + return &value +} +func (r *DbcsSystemReconciler) stringToDbBackupConfigAutoBackupWindowEnum(value *string) (database.DbBackupConfigAutoBackupWindowEnum, error) { + // Define a default value + // Define a default value + const defaultAutoBackupWindow = database.DbBackupConfigAutoBackupWindowOne + + if value == nil { + return defaultAutoBackupWindow, nil // Return the default value + } + + // Convert to enum + enum, ok := database.GetMappingDbBackupConfigAutoBackupWindowEnum(*value) + if !ok { + return "", fmt.Errorf("invalid value for AutoBackupWindow: %s", *value) + } + return enum, nil } -func assignDBCSID(dbcsInst *databasev1alpha1.DbcsSystem, dbcsID string) { +func assignDBCSID(dbcsInst *databasev4.DbcsSystem, dbcsID string) { dbcsInst.Spec.Id = &dbcsID } @@ -286,8 +1457,8 @@ func (r *DbcsSystemReconciler) eventFilterPredicate() predicate.Predicate { }, UpdateFunc: func(e event.UpdateEvent) bool { // Get the dbName as old dbName when an update event happens - oldObject := e.ObjectOld.DeepCopyObject().(*databasev1alpha1.DbcsSystem) - newObject := e.ObjectNew.DeepCopyObject().(*databasev1alpha1.DbcsSystem) + oldObject := e.ObjectOld.DeepCopyObject().(*databasev4.DbcsSystem) + newObject := e.ObjectNew.DeepCopyObject().(*databasev4.DbcsSystem) specObject := !reflect.DeepEqual(oldObject.Spec, newObject.Spec) deletionTimeStamp := !reflect.DeepEqual(oldObject.GetDeletionTimestamp(), newObject.GetDeletionTimestamp()) @@ -307,7 +1478,7 @@ func (r *DbcsSystemReconciler) eventFilterPredicate() predicate.Predicate { // SetupWithManager sets up the controller with the Manager. func (r *DbcsSystemReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&databasev1alpha1.DbcsSystem{}). + For(&databasev4.DbcsSystem{}). WithEventFilter(r.eventFilterPredicate()). WithOptions(controller.Options{MaxConcurrentReconciles: 50}). Complete(r) diff --git a/controllers/database/lrest_controller.go b/controllers/database/lrest_controller.go new file mode 100644 index 00000000..91c883e1 --- /dev/null +++ b/controllers/database/lrest_controller.go @@ -0,0 +1,1105 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package controllers + +import ( + "context" + "encoding/json" + "errors" + "fmt" + + //"fmt" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + dbapi "github.com/oracle/oracle-database-operator/apis/database/v4" + dbcommons "github.com/oracle/oracle-database-operator/commons/database" + //lrcommons "github.com/oracle/oracle-database-operator/commons/multitenant/lrest" +) + +// LRESTReconciler reconciles a LREST object +type LRESTReconciler struct { + client.Client + Scheme *runtime.Scheme + Config *rest.Config + Log logr.Logger + Interval time.Duration + Recorder record.EventRecorder +} + +var ( + lrestPhaseInit = "Initializing" + lrestPhasePod = "CreatingPod" + lrestPhaseValPod = "ValidatingPods" + lrestPhaseService = "CreatingService" + lrestPhaseSecrets = "DeletingSecrets" + lrestPhaseReady = "Ready" + lrestPhaseDelete = "Deleting" + lrestPhaseFail = "Failed" +) + +const LRESTFinalizer = "database.oracle.com/LRESTfinalizer" + +//+kubebuilder:rbac:groups=database.oracle.com,resources=lrests,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=database.oracle.com,resources=lrests/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=database.oracle.com,resources=lrests/finalizers,verbs=update +//+kubebuilder:rbac:groups="",resources=pods;pods/log;pods/exec;services;configmaps;events;replicasets,verbs=create;delete;get;list;patch;update;watch +//+kubebuilder:rbac:groups=core,resources=pods;secrets;services;configmaps;namespaces,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=apps,resources=replicasets,verbs=get;list;watch;create;update;patch;delete + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the LREST object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.9.2/pkg/reconcile +func (r *LRESTReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + + log := r.Log.WithValues("multitenantoperator", req.NamespacedName) + log.Info("Reconcile requested") + + reconcilePeriod := r.Interval * time.Second + requeueY := ctrl.Result{Requeue: true, RequeueAfter: reconcilePeriod} + requeueN := ctrl.Result{} + + var err error + lrest := &dbapi.LREST{} + + // Execute for every reconcile + defer func() { + log.Info("DEFER", "Name", lrest.Name, "Phase", lrest.Status.Phase, "Status", strconv.FormatBool(lrest.Status.Status)) + if !lrest.Status.Status { + if err := r.Status().Update(ctx, lrest); err != nil { + log.Error(err, "Failed to update status for :"+lrest.Name, "err", err.Error()) + } + } + }() + + err = r.Client.Get(context.TODO(), req.NamespacedName, lrest) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("LREST Resource Not found", "Name", lrest.Name) + // Request object not found, could have been deleted after reconcile req. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + lrest.Status.Status = true + return requeueN, nil + } + // Error reading the object - requeue the req. + return requeueY, err + } + + log.Info("Res Status:", "Name", lrest.Name, "Phase", lrest.Status.Phase, "Status", strconv.FormatBool(lrest.Status.Status)) + + // Finalizer section + err = r.manageLRESTDeletion(ctx, req, lrest) + if err != nil { + log.Info("Reconcile queued") + return requeueY, nil + } + + // If post-creation, LREST spec is changed, check and take appropriate action + if (lrest.Status.Phase == lrestPhaseReady) && lrest.Status.Status { + r.evaluateSpecChange(ctx, req, lrest) + } + + if !lrest.Status.Status { + phase := lrest.Status.Phase + log.Info("Current Phase:"+phase, "Name", lrest.Name) + + switch phase { + case lrestPhaseInit: + err = r.verifySecrets(ctx, req, lrest) + if err != nil { + lrest.Status.Phase = lrestPhaseFail + return requeueN, nil + } + lrest.Status.Phase = lrestPhasePod + case lrestPhasePod: + // Create LREST PODs + err = r.createLRESTInstances(ctx, req, lrest) + if err != nil { + log.Info("Reconcile queued") + return requeueY, nil + } + lrest.Status.Phase = lrestPhaseValPod + case lrestPhaseValPod: + // Validate LREST PODs + err = r.validateLRESTPods(ctx, req, lrest) + if err != nil { + if lrest.Status.Phase == lrestPhaseFail { + return requeueN, nil + } + log.Info("Reconcile queued") + return requeueY, nil + } + lrest.Status.Phase = lrestPhaseService + case lrestPhaseService: + // Create LREST Service + err = r.createLRESTSVC(ctx, req, lrest) + if err != nil { + log.Info("Reconcile queued") + return requeueY, nil + } + //lrest.Status.Phase = lrestPhaseSecrets + lrest.Status.Phase = lrestPhaseReady + case lrestPhaseSecrets: + // Delete LREST Secrets + //r.deleteSecrets(ctx, req, lrest) + lrest.Status.Phase = lrestPhaseReady + lrest.Status.Msg = "Success" + case lrestPhaseReady: + lrest.Status.Status = true + r.Status().Update(ctx, lrest) + return requeueN, nil + default: + lrest.Status.Phase = lrestPhaseInit + log.Info("DEFAULT:", "Name", lrest.Name, "Phase", phase, "Status", strconv.FormatBool(lrest.Status.Status)) + } + + if err := r.Status().Update(ctx, lrest); err != nil { + log.Error(err, "Failed to update status for :"+lrest.Name, "err", err.Error()) + } + return requeueY, nil + } + + log.Info("Reconcile completed") + return requeueN, nil +} + +/* +********************************************************* + - Create a ReplicaSet for pods based on the LREST container + /******************************************************* +*/ +func (r *LRESTReconciler) createLRESTInstances(ctx context.Context, req ctrl.Request, lrest *dbapi.LREST) error { + + log := r.Log.WithValues("createLRESTInstances", req.NamespacedName) + + replicaSet := r.createReplicaSetSpec(lrest) + + foundRS := &appsv1.ReplicaSet{} + err := r.Get(context.TODO(), types.NamespacedName{Name: replicaSet.Name, Namespace: lrest.Namespace}, foundRS) + if err != nil && apierrors.IsNotFound(err) { + log.Info("Creating LREST Replicaset: " + replicaSet.Name) + err = r.Create(ctx, replicaSet) + if err != nil { + log.Error(err, "Failed to create ReplicaSet for :"+lrest.Name, "Namespace", replicaSet.Namespace, "Name", replicaSet.Name) + return err + } + } else if err != nil { + log.Error(err, "Replicaset : "+replicaSet.Name+" already exists.") + return err + } + + // Set LREST instance as the owner and controller + ctrl.SetControllerReference(lrest, replicaSet, r.Scheme) + + log.Info("Created LREST ReplicaSet successfully") + r.Recorder.Eventf(lrest, corev1.EventTypeNormal, "CreatedLRESTReplicaSet", "Created LREST Replicaset (Replicas - %s) for %s", strconv.Itoa(lrest.Spec.Replicas), lrest.Name) + return nil +} + +/* +************************************************ + - Validate LREST Pod. Check if there are any errors + /*********************************************** +*/ +func (r *LRESTReconciler) validateLRESTPods(ctx context.Context, req ctrl.Request, lrest *dbapi.LREST) error { + + log := r.Log.WithValues("validateLRESTPod", req.NamespacedName) + + log.Info("Validating Pod creation for :" + lrest.Name) + + podName := lrest.Name + "-lrest" + podList := &corev1.PodList{} + listOpts := []client.ListOption{client.InNamespace(req.Namespace), client.MatchingLabels{"name": podName}} + + // List retrieves list of objects for a given namespace and list options. + err := r.List(ctx, podList, listOpts...) + if err != nil { + log.Info("Failed to list pods of: "+podName, "Namespace", req.Namespace) + return err + } + + if len(podList.Items) == 0 { + log.Info("No pods found for: "+podName, "Namespace", req.Namespace) + lrest.Status.Msg = "Waiting for LREST Pod(s) to start" + return errors.New("Waiting for LREST pods to start") + } + + getLRESTStatus := " curl --cert /opt/oracle/lrest/certificates/tls.crt --cacert /opt/oracle/lrest/certificates/ca.crt --key /opt/oracle/lrest/certificates/tls.key -u `cat /opt/oracle/lrest/certificates/webserver_user`:`cat /opt/oracle/lrest/certificates/webserver_pwd` -sSkv -k -X GET https://localhost:" + strconv.Itoa(lrest.Spec.LRESTPort) + "/database/pdbs/" + readyPods := 0 + for _, pod := range podList.Items { + if pod.Status.Phase == corev1.PodRunning { + // Get LREST Status + out, err := dbcommons.ExecCommand(r, r.Config, pod.Name, pod.Namespace, "", ctx, req, false, "bash", "-c", getLRESTStatus) + if strings.Contains(out, "HTTP/1.1 200 OK") || strings.Contains(strings.ToUpper(err.Error()), "HTTP/1.1 200 OK") || + strings.Contains(out, "HTTP/2") || strings.Contains(strings.ToUpper(err.Error()), " HTTP/2") { + readyPods++ + } else if strings.Contains(out, "HTTP/1.1 404 Not Found") || strings.Contains(strings.ToUpper(err.Error()), "HTTP/1.1 404 NOT FOUND") || strings.Contains(strings.ToUpper(err.Error()), "HTTP/2 404") || strings.Contains(strings.ToUpper(err.Error()), "Failed to connect to localhost") { + // Check if DB connection parameters are correct + getLRESTInstallStatus := " grep -q 'Failed to' /tmp/lrest_install.log; echo $?;" + out, _ := dbcommons.ExecCommand(r, r.Config, pod.Name, pod.Namespace, "", ctx, req, false, "bash", "-c", getLRESTInstallStatus) + if strings.TrimSpace(out) == "0" { + lrest.Status.Msg = "Check DB connection parameters" + lrest.Status.Phase = lrestPhaseFail + // Delete existing ReplicaSet + r.deleteReplicaSet(ctx, req, lrest) + return errors.New("Check DB connection parameters") + } + } + } + } + + if readyPods != lrest.Spec.Replicas { + log.Info("Replicas: "+strconv.Itoa(lrest.Spec.Replicas), "Ready Pods: ", readyPods) + lrest.Status.Msg = "Waiting for LREST Pod(s) to be ready" + return errors.New("Waiting for LREST pods to be ready") + } + + lrest.Status.Msg = "" + return nil +} + +/* +*********************** + - Create Pod spec + +/*********************** +*/ +func (r *LRESTReconciler) createPodSpec(lrest *dbapi.LREST) corev1.PodSpec { + + podSpec := corev1.PodSpec{ + Volumes: []corev1.Volume{{ + Name: "secrets", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + DefaultMode: func() *int32 { i := int32(0666); return &i }(), + Sources: []corev1.VolumeProjection{ + { + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: lrest.Spec.LRESTPubKey.Secret.SecretName, + }, + Items: []corev1.KeyToPath{ + { + Key: lrest.Spec.LRESTPubKey.Secret.Key, + Path: lrest.Spec.LRESTPubKey.Secret.Key, + }, + }, + }, + }, + { + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: lrest.Spec.LRESTPriKey.Secret.SecretName, + }, + Items: []corev1.KeyToPath{ + { + Key: lrest.Spec.LRESTPriKey.Secret.Key, + Path: lrest.Spec.LRESTPriKey.Secret.Key, + }, + }, + }, + }, + + /***/ + { + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: lrest.Spec.LRESTTlsKey.Secret.SecretName, + }, + Items: []corev1.KeyToPath{ + { + Key: lrest.Spec.LRESTTlsKey.Secret.Key, + Path: lrest.Spec.LRESTTlsKey.Secret.Key, + }, + }, + }, + }, + { + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: lrest.Spec.LRESTTlsCrt.Secret.SecretName, + }, + Items: []corev1.KeyToPath{ + { + Key: lrest.Spec.LRESTTlsCrt.Secret.Key, + Path: lrest.Spec.LRESTTlsCrt.Secret.Key, + }, + }, + }, + }, + }, + }, + }, + }}, + SecurityContext: &corev1.PodSecurityContext{ + RunAsNonRoot: &[]bool{true}[0], + FSGroup: &[]int64{54321}[0], + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, + /*InitContainers: []corev1.Container{{ + Image: lrest.Spec.LRESTImage, + Name: lrest.Name + "-init", + ImagePullPolicy: corev1.PullIfNotPresent, + SecurityContext: securityContextDefineLrest(), + Command: []string{"echo test > /opt/oracle/lrest/certificates/tests"}, + Env: func() []corev1.EnvVar { + return []corev1.EnvVar{ + { + Name: "ORACLE_HOST", + Value: lrest.Spec.DBTnsurl, + }} + }(), + VolumeMounts: []corev1.VolumeMount{ + { + MountPath: "/opt/oracle/lrest/certificates", + Name: "secrets", + ReadOnly: false, + }}, + }},*/ + Containers: []corev1.Container{{ + Image: lrest.Spec.LRESTImage, + Name: lrest.Name + "-lrest", + ImagePullPolicy: corev1.PullIfNotPresent, + SecurityContext: securityContextDefineLrest(), + VolumeMounts: []corev1.VolumeMount{ + { + MountPath: "/opt/oracle/lrest/certificates", + Name: "secrets", + ReadOnly: true, + }, + }, + Env: func() []corev1.EnvVar { + return []corev1.EnvVar{ + { + Name: "ORACLE_HOST", + Value: lrest.Spec.DBServer, + }, + { + Name: "DBTNSURL", + Value: lrest.Spec.DBTnsurl, + }, + { + Name: "TLSCRT", + Value: lrest.Spec.LRESTTlsCrt.Secret.Key, + }, + { + Name: "TLSKEY", + Value: lrest.Spec.LRESTTlsKey.Secret.Key, + }, + { + Name: "PUBKEY", + Value: lrest.Spec.LRESTPubKey.Secret.Key, + }, + { + Name: "PRVKEY", + Value: lrest.Spec.LRESTPriKey.Secret.Key, + }, + { + Name: "ORACLE_PORT", + Value: strconv.Itoa(lrest.Spec.DBPort), + }, + { + Name: "LREST_PORT", + Value: strconv.Itoa(lrest.Spec.LRESTPort), + }, + { + Name: "ORACLE_SERVICE", + Value: lrest.Spec.ServiceName, + }, + { + Name: "R1", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: lrest.Spec.LRESTAdminUser.Secret.SecretName, + }, + Key: lrest.Spec.LRESTAdminUser.Secret.Key, + }, + }, + }, + { + Name: "R2", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: lrest.Spec.LRESTAdminPwd.Secret.SecretName, + }, + Key: lrest.Spec.LRESTAdminPwd.Secret.Key, + }, + }, + }, + { + Name: "R3", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: lrest.Spec.WebLrestServerUser.Secret.SecretName, + }, + Key: lrest.Spec.WebLrestServerUser.Secret.Key, + }, + }, + }, + { + Name: "R4", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: lrest.Spec.WebLrestServerPwd.Secret.SecretName, + }, + Key: lrest.Spec.WebLrestServerPwd.Secret.Key, + }, + }, + }, + } + }(), + }}, + + NodeSelector: func() map[string]string { + ns := make(map[string]string) + if len(lrest.Spec.NodeSelector) != 0 { + for key, value := range lrest.Spec.NodeSelector { + ns[key] = value + } + } + return ns + }(), + } + + if len(lrest.Spec.LRESTImagePullSecret) > 0 { + podSpec.ImagePullSecrets = []corev1.LocalObjectReference{ + { + Name: lrest.Spec.LRESTImagePullSecret, + }, + } + } + + podSpec.Containers[0].ImagePullPolicy = corev1.PullAlways + + if len(lrest.Spec.LRESTImagePullPolicy) > 0 { + if strings.ToUpper(lrest.Spec.LRESTImagePullPolicy) == "NEVER" { + podSpec.Containers[0].ImagePullPolicy = corev1.PullNever + } + } + + return podSpec +} + +/* +*********************** + - Create ReplicaSet spec + +/*********************** +*/ +func (r *LRESTReconciler) createReplicaSetSpec(lrest *dbapi.LREST) *appsv1.ReplicaSet { + + replicas := int32(lrest.Spec.Replicas) + podSpec := r.createPodSpec(lrest) + + replicaSet := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: lrest.Name + "-lrest-rs", + Namespace: lrest.Namespace, + Labels: map[string]string{ + "name": lrest.Name + "-lrest-rs", + }, + }, + Spec: appsv1.ReplicaSetSpec{ + Replicas: &replicas, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: lrest.Name + "-lrest", + Namespace: lrest.Namespace, + Labels: map[string]string{ + "name": lrest.Name + "-lrest", + }, + }, + Spec: podSpec, + }, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "name": lrest.Name + "-lrest", + }, + }, + }, + } + + return replicaSet +} + +/* +********************************************************* + - Evaluate change in Spec post creation and instantiation + /******************************************************* +*/ +func (r *LRESTReconciler) deleteReplicaSet(ctx context.Context, req ctrl.Request, lrest *dbapi.LREST) error { + log := r.Log.WithValues("deleteReplicaSet", req.NamespacedName) + + k_client, err := kubernetes.NewForConfig(r.Config) + if err != nil { + log.Error(err, "Kubernetes Config Error") + return err + } + + replicaSetName := lrest.Name + "-lrest-rs" + err = k_client.AppsV1().ReplicaSets(lrest.Namespace).Delete(context.TODO(), replicaSetName, metav1.DeleteOptions{}) + if err != nil { + log.Info("Could not delete ReplicaSet", "RS Name", replicaSetName, "err", err.Error()) + if !strings.Contains(strings.ToUpper(err.Error()), "NOT FOUND") { + return err + } + } else { + log.Info("Successfully deleted LREST ReplicaSet", "RS Name", replicaSetName) + } + + return nil +} + +/* +********************************************************* + - Evaluate change in Spec post creation and instantiation + /******************************************************* +*/ +func (r *LRESTReconciler) evaluateSpecChange(ctx context.Context, req ctrl.Request, lrest *dbapi.LREST) error { + log := r.Log.WithValues("evaluateSpecChange", req.NamespacedName) + + // List the Pods matching the PodTemplate Labels + podName := lrest.Name + "-lrest" + podList := &corev1.PodList{} + listOpts := []client.ListOption{client.InNamespace(req.Namespace), client.MatchingLabels{"name": podName}} + + // List retrieves list of objects for a given namespace and list options. + err := r.List(ctx, podList, listOpts...) + if err != nil { + log.Info("Failed to list pods of: "+podName, "Namespace", req.Namespace) + return err + } + + var foundPod corev1.Pod + for _, pod := range podList.Items { + foundPod = pod + break + } + + lrestSpecChange := false + for _, envVar := range foundPod.Spec.Containers[0].Env { + if envVar.Name == "ORACLE_HOST" && envVar.Value != lrest.Spec.DBServer { + lrestSpecChange = true + } else if envVar.Name == "ORACLE_PORT" && envVar.Value != strconv.Itoa(lrest.Spec.DBPort) { + lrestSpecChange = true + } else if envVar.Name == "LREST_PORT" && envVar.Value != strconv.Itoa(lrest.Spec.LRESTPort) { + lrestSpecChange = true + } else if envVar.Name == "ORACLE_SERVICE" && envVar.Value != lrest.Spec.ServiceName { + lrestSpecChange = true + } + } + + if lrestSpecChange { + // Delete existing ReplicaSet + err = r.deleteReplicaSet(ctx, req, lrest) + if err != nil { + return err + } + + lrest.Status.Phase = lrestPhaseInit + lrest.Status.Status = false + r.Status().Update(ctx, lrest) + } else { + // Update the RS if the value of "replicas" is changed + replicaSetName := lrest.Name + "-lrest-rs" + + foundRS := &appsv1.ReplicaSet{} + err := r.Get(context.TODO(), types.NamespacedName{Name: replicaSetName, Namespace: lrest.Namespace}, foundRS) + if err != nil { + log.Error(err, "Unable to get LREST Replicaset: "+replicaSetName) + return err + } + + // Check if number of replicas have changed + replicas := int32(lrest.Spec.Replicas) + if lrest.Spec.Replicas != int(*(foundRS.Spec.Replicas)) { + log.Info("Existing Replicas: " + strconv.Itoa(int(*(foundRS.Spec.Replicas))) + ", New Replicas: " + strconv.Itoa(lrest.Spec.Replicas)) + foundRS.Spec.Replicas = &replicas + err = r.Update(ctx, foundRS) + if err != nil { + log.Error(err, "Failed to update ReplicaSet for :"+lrest.Name, "Namespace", lrest.Namespace, "Name", replicaSetName) + return err + } + lrest.Status.Phase = lrestPhaseValPod + lrest.Status.Status = false + r.Status().Update(ctx, lrest) + } + } + + return nil +} + +/* +************************************************ + - Create a Cluster Service for LREST LREST Pod + /*********************************************** +*/ +func (r *LRESTReconciler) createLRESTSVC(ctx context.Context, req ctrl.Request, lrest *dbapi.LREST) error { + + log := r.Log.WithValues("createLRESTSVC", req.NamespacedName) + + foundSvc := &corev1.Service{} + err := r.Get(context.TODO(), types.NamespacedName{Name: lrest.Name + "-lrest", Namespace: lrest.Namespace}, foundSvc) + if err != nil && apierrors.IsNotFound(err) { + svc := r.createSvcSpec(lrest) + + log.Info("Creating a new Cluster Service for: "+lrest.Name, "Svc.Namespace", svc.Namespace, "Service.Name", svc.Name) + err := r.Create(ctx, svc) + if err != nil { + log.Error(err, "Failed to create new Cluster Service for: "+lrest.Name, "Svc.Namespace", svc.Namespace, "Service.Name", svc.Name) + return err + } + + log.Info("Created LREST Cluster Service successfully") + r.Recorder.Eventf(lrest, corev1.EventTypeNormal, "CreatedLRESTService", "Created LREST Service for %s", lrest.Name) + } else { + log.Info("LREST Cluster Service already exists") + } + + return nil +} + +/* +*********************** + - Create Service spec + /*********************** +*/ +func (r *LRESTReconciler) createSvcSpec(lrest *dbapi.LREST) *corev1.Service { + + svc := &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: lrest.Name + "-lrest", + Namespace: lrest.Namespace, + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + "name": lrest.Name + "-lrest", + }, + ClusterIP: corev1.ClusterIPNone, + }, + } + // Set LREST instance as the owner and controller + ctrl.SetControllerReference(lrest, svc, r.Scheme) + return svc +} + +/* +************************************************ + - Check LREST deletion + /*********************************************** +*/ + +func (r *LRESTReconciler) manageLRESTDeletion(ctx context.Context, req ctrl.Request, lrest *dbapi.LREST) error { + log := r.Log.WithValues("manageLRESTDeletion", req.NamespacedName) + + /* REGISTER FINALIZER */ + if lrest.ObjectMeta.DeletionTimestamp.IsZero() { + if !controllerutil.ContainsFinalizer(lrest, LRESTFinalizer) { + controllerutil.AddFinalizer(lrest, LRESTFinalizer) + if err := r.Update(ctx, lrest); err != nil { + return err + } + } + + } else { + log.Info("lrest set to be deleted") + lrest.Status.Phase = lrestPhaseDelete + lrest.Status.Status = true + r.Status().Update(ctx, lrest) + + if controllerutil.ContainsFinalizer(lrest, LRESTFinalizer) { + + if err := r.DeletePDBS(ctx, req, lrest); err != nil { + log.Info("Cannot delete lrpdbs") + return err + } + + controllerutil.RemoveFinalizer(lrest, LRESTFinalizer) + if err := r.Update(ctx, lrest); err != nil { + return err + } + } + + err := r.deleteLRESTInstance(ctx, req, lrest) + if err != nil { + log.Info("Could not delete LREST Resource", "LREST Name", lrest.Spec.LRESTName, "err", err.Error()) + return err + } + + } + return nil +} + +/* +************************************************ + - Delete LREST Resource + +/*********************************************** +*/ +func (r *LRESTReconciler) deleteLRESTInstance(ctx context.Context, req ctrl.Request, lrest *dbapi.LREST) error { + + log := r.Log.WithValues("deleteLRESTInstance", req.NamespacedName) + + k_client, err := kubernetes.NewForConfig(r.Config) + if err != nil { + log.Error(err, "Kubernetes Config Error") + } + + replicaSetName := lrest.Name + "-lrest-rs" + + err = k_client.AppsV1().ReplicaSets(lrest.Namespace).Delete(context.TODO(), replicaSetName, metav1.DeleteOptions{}) + if err != nil { + log.Info("Could not delete ReplicaSet", "RS Name", replicaSetName, "err", err.Error()) + if !strings.Contains(strings.ToUpper(err.Error()), "NOT FOUND") { + return err + } + } else { + log.Info("Successfully deleted LREST ReplicaSet", "RS Name", replicaSetName) + } + + r.Recorder.Eventf(lrest, corev1.EventTypeNormal, "DeletedLRESTReplicaSet", "Deleted LREST ReplicaSet for %s", lrest.Name) + + svcName := lrest.Name + "-lrest" + + err = k_client.CoreV1().Services(lrest.Namespace).Delete(context.TODO(), svcName, metav1.DeleteOptions{}) + if err != nil { + log.Info("Could not delete Service", "Service Name", svcName, "err", err.Error()) + if !strings.Contains(strings.ToUpper(err.Error()), "NOT FOUND") { + return err + } + } else { + r.Recorder.Eventf(lrest, corev1.EventTypeNormal, "DeletedLRESTService", "Deleted LREST Service for %s", lrest.Name) + log.Info("Successfully deleted LREST Service", "Service Name", svcName) + } + + log.Info("Successfully deleted LREST resource", "LREST Name", lrest.Spec.LRESTName) + return nil +} + +/* +************************************************ + - Get Secret Key for a Secret Name + /*********************************************** +*/ +func (r *LRESTReconciler) verifySecrets(ctx context.Context, req ctrl.Request, lrest *dbapi.LREST) error { + + log := r.Log.WithValues("verifySecrets", req.NamespacedName) + /* + if err := r.checkSecret(ctx, req, lrest, lrest.Spec.SysAdminPwd.Secret.SecretName); err != nil { + return err + }*/ + if err := r.checkSecret(ctx, req, lrest, lrest.Spec.LRESTAdminUser.Secret.SecretName); err != nil { + return err + } + if err := r.checkSecret(ctx, req, lrest, lrest.Spec.LRESTAdminPwd.Secret.SecretName); err != nil { + return err + } + /* + if err := r.checkSecret(ctx, req, lrest, lrest.Spec.LRESTPwd.Secret.SecretName); err != nil { + return err + }*/ + if err := r.checkSecret(ctx, req, lrest, lrest.Spec.WebLrestServerUser.Secret.SecretName); err != nil { + return err + } + if err := r.checkSecret(ctx, req, lrest, lrest.Spec.WebLrestServerPwd.Secret.SecretName); err != nil { + return err + } + + lrest.Status.Msg = "" + log.Info("Verified secrets successfully") + return nil +} + +/* +************************************************ + - Get Secret Key for a Secret Name + /*********************************************** +*/ +func (r *LRESTReconciler) checkSecret(ctx context.Context, req ctrl.Request, lrest *dbapi.LREST, secretName string) error { + + log := r.Log.WithValues("checkSecret", req.NamespacedName) + + secret := &corev1.Secret{} + err := r.Get(ctx, types.NamespacedName{Name: secretName, Namespace: lrest.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + secretName) + lrest.Status.Msg = "Secret not found:" + secretName + return err + } + log.Error(err, "Unable to get the secret.") + return err + } + + return nil +} + +/* +************************************************ + - Delete Secrets + /*********************************************** +*/ +func (r *LRESTReconciler) deleteSecrets(ctx context.Context, req ctrl.Request, lrest *dbapi.LREST) { + + log := r.Log.WithValues("deleteSecrets", req.NamespacedName) + + log.Info("Deleting LREST secrets") + secret := &corev1.Secret{} + /* + err := r.Get(ctx, types.NamespacedName{Name: lrest.Spec.SysAdminPwd.Secret.SecretName, Namespace: lrest.Namespace}, secret) + if err == nil { + err := r.Delete(ctx, secret) + if err == nil { + log.Info("Deleted the secret : " + lrest.Spec.SysAdminPwd.Secret.SecretName) + } + } + */ + + err := r.Get(ctx, types.NamespacedName{Name: lrest.Spec.LRESTAdminUser.Secret.SecretName, Namespace: lrest.Namespace}, secret) + if err == nil { + err := r.Delete(ctx, secret) + if err == nil { + log.Info("Deleted the secret : " + lrest.Spec.LRESTAdminUser.Secret.SecretName) + } + } + + err = r.Get(ctx, types.NamespacedName{Name: lrest.Spec.LRESTAdminPwd.Secret.SecretName, Namespace: lrest.Namespace}, secret) + if err == nil { + err := r.Delete(ctx, secret) + if err == nil { + log.Info("Deleted the secret : " + lrest.Spec.LRESTAdminPwd.Secret.SecretName) + } + } + /* + err = r.Get(ctx, types.NamespacedName{Name: lrest.Spec.LRESTPwd.Secret.SecretName, Namespace: lrest.Namespace}, secret) + if err == nil { + err := r.Delete(ctx, secret) + if err == nil { + log.Info("Deleted the secret : " + lrest.Spec.LRESTPwd.Secret.SecretName) + } + } + */ + + err = r.Get(ctx, types.NamespacedName{Name: lrest.Spec.WebLrestServerUser.Secret.SecretName, Namespace: lrest.Namespace}, secret) + if err == nil { + err := r.Delete(ctx, secret) + if err == nil { + log.Info("Deleted the secret : " + lrest.Spec.WebLrestServerUser.Secret.SecretName) + } + } + + err = r.Get(ctx, types.NamespacedName{Name: lrest.Spec.WebLrestServerPwd.Secret.SecretName, Namespace: lrest.Namespace}, secret) + if err == nil { + err := r.Delete(ctx, secret) + if err == nil { + log.Info("Deleted the secret : " + lrest.Spec.WebLrestServerPwd.Secret.SecretName) + } + } +} + +/* +************************************************************* + - SetupWithManager sets up the controller with the Manager. + /************************************************************ +*/ +func (r *LRESTReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&dbapi.LREST{}). + Owns(&appsv1.ReplicaSet{}). //Watch for deleted RS owned by this controller + WithEventFilter(predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + // Ignore updates to CR status in which case metadata.Generation does not change + return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Evaluates to false if the object has been confirmed deleted. + //return !e.DeleteStateUnknown + return false + }, + }). + WithOptions(controller.Options{MaxConcurrentReconciles: 100}). + Complete(r) +} + +func securityContextDefineLrest() *corev1.SecurityContext { + return &corev1.SecurityContext{ + RunAsNonRoot: &[]bool{true}[0], + RunAsUser: &[]int64{54321}[0], + AllowPrivilegeEscalation: &[]bool{false}[0], + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{ + "ALL", + }, + }, + } +} + +func (r *LRESTReconciler) DeletePDBS(ctx context.Context, req ctrl.Request, lrest *dbapi.LREST) error { + log := r.Log.WithValues("DeletePDBS", req.NamespacedName) + + /* =================== DELETE CASCADE ================ */ + if lrest.Spec.DeletePDBCascade == true { + log.Info("DELETE PDB CASCADE OPTION") + lrpdbList := &dbapi.LRPDBList{} + listOpts := []client.ListOption{} + err := r.List(ctx, lrpdbList, listOpts...) + if err != nil { + log.Info("Failed to get the list of pdbs") + } + + if err == nil { + for _, pdbitem := range lrpdbList.Items { + log.Info("pdbitem.Spec.CDBName:" + pdbitem.Spec.CDBName) + log.Info("lrest.Spec.LRESTName:" + lrest.Spec.LRESTName) + if pdbitem.Spec.CDBName == lrest.Spec.LRESTName { + fmt.Printf("DEVPHASE: Call Delete function for %s %s\n", pdbitem.Name, pdbitem.Spec.LRPDBName) + + var objmap map[string]interface{} /* Used for the return payload */ + values := map[string]string{ + "state": "CLOSE", + "modifyOption": "ABORT", + } + + url := "https://" + pdbitem.Spec.CDBResName + "-lrest." + pdbitem.Spec.CDBNamespace + ":" + strconv.Itoa(lrest.Spec.LRESTPort) + "/database/pdbs/" + pdbitem.Spec.LRPDBName + + log.Info("callAPI(URL):" + url) + log.Info("pdbitem.Status.OpenMode" + pdbitem.Status.OpenMode) + + if pdbitem.Status.OpenMode != "MOUNTED" { + + log.Info("Force pdb closure") + respData, errapi := NewCallLAPI(r, ctx, req, &pdbitem, url, values, "POST") + + if err := json.Unmarshal([]byte(respData), &objmap); err != nil { + log.Error(err, "failed to get respData from callAPI", "err", err.Error()) + return err + } + + pdbitem.Status.SqlCode = int(objmap["sqlcode"].(float64)) + log.Info("pdb closure.......:", "sqlcode", pdbitem.Status.SqlCode) + + if errapi != nil { + log.Error(err, "callAPI cannot close pdb "+pdbitem.Spec.LRPDBName, "err", err.Error()) + return err + } + + r.Recorder.Eventf(lrest, corev1.EventTypeNormal, "close pdb", "pdbname=%s", pdbitem.Spec.LRPDBName) + } + + /* start dropping pdb */ + log.Info("Drop pluggable database") + values = map[string]string{ + "action": "INCLUDING", + } + respData, errapi := NewCallLAPI(r, ctx, req, &pdbitem, url, values, "DELETE") + + if err := json.Unmarshal([]byte(respData), &objmap); err != nil { + log.Error(err, "failed to get respData from callAPI", "err", err.Error()) + return err + } + + pdbitem.Status.SqlCode = int(objmap["sqlcode"].(float64)) + log.Info(".......:", "sqlcode", pdbitem.Status.SqlCode) + + if errapi != nil { + log.Error(err, "callAPI cannot drop pdb "+pdbitem.Spec.LRPDBName, "err", err.Error()) + return err + } + r.Recorder.Eventf(lrest, corev1.EventTypeNormal, "drop pdb", "pdbname=%s", pdbitem.Spec.LRPDBName) + + /* remove finalizer */ + + if controllerutil.ContainsFinalizer(&pdbitem, LRPDBFinalizer) { + log.Info("Removing finalizer") + controllerutil.RemoveFinalizer(&pdbitem, LRPDBFinalizer) + err = r.Update(ctx, &pdbitem) + if err != nil { + log.Info("Could not remove finalizer", "err", err.Error()) + return err + } + } + + err = r.Delete(context.Background(), &pdbitem, client.GracePeriodSeconds(1)) + if err != nil { + log.Info("Could not delete LRPDB resource", "err", err.Error()) + return err + } + + } /* check pdb name */ + } /* end of loop */ + } + + } + /* ================================================ */ + return nil +} diff --git a/controllers/database/lrpdb_controller.go b/controllers/database/lrpdb_controller.go new file mode 100644 index 00000000..1aadf65b --- /dev/null +++ b/controllers/database/lrpdb_controller.go @@ -0,0 +1,2381 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package controllers + +import ( + "bytes" + "context" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + + //"encoding/pem" + "errors" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strconv" + "strings" + "time" + + dbapi "github.com/oracle/oracle-database-operator/apis/database/v4" + "github.com/oracle/oracle-database-operator/commons/k8s" + lrcommons "github.com/oracle/oracle-database-operator/commons/multitenant/lrest" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" + + //metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// Bitmask functions +const ( + MPAPPL = 0x00000001 /* The map config has been applyed */ + MPSYNC = 0x00000002 /* The map config is in sync with v$parameters where is default=flase */ + MPEMPT = 0x00000004 /* The map is empty - not specify */ + MPWARN = 0x00000008 /* Map applied with warnings */ + MPINIT = 0x00000010 /* Config map init */ + SPARE3 = 0x00000020 +) + +func bis(bitmask int, bitval int) int { + bitmask = ((bitmask) | (bitval)) + return bitmask +} + +func bit(bitmask int, bitval int) bool { + if bitmask&bitval != 0 { + return true + } else { + return false + } +} + +func bid(bitmask int, bitval int) int { + bitmask ^= ((bitval) & (bitmask)) + return bitmask +} + +func bitmaskprint(bitmask int) string { + BitRead := "|" + if bit(bitmask, MPAPPL) { + BitRead = strings.Join([]string{BitRead, "MPAPPL|"}, "") + } + if bit(bitmask, MPSYNC) { + BitRead = strings.Join([]string{BitRead, "MPSYNC|"}, "") + } + if bit(bitmask, MPEMPT) { + BitRead = strings.Join([]string{BitRead, "MPEMPT|"}, "") + } + if bit(bitmask, MPWARN) { + BitRead = strings.Join([]string{BitRead, "MPWARN|"}, "") + } + if bit(bitmask, MPINIT) { + BitRead = strings.Join([]string{BitRead, "MPINIT|"}, "") + } + if bit(bitmask, SPARE3) { + BitRead = strings.Join([]string{BitRead, "SPARE3|"}, "") + } + + return BitRead +} + +// LRPDBReconciler reconciles a LRPDB object +type LRPDBReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + Interval time.Duration + Recorder record.EventRecorder +} + +type restSQLCollection struct { + Env struct { + DefaultTimeZone string `json:"defaultTimeZone,omitempty"` + } `json:"env"` + Items []SQL_Item `json:"items"` +} + +type SQL_Item struct { + StatementId int `json:"statementId,omitempty"` + Response []string `json:"response"` + ErrorCode int `json:"errorCode,omitempty"` + ErrorLine int `json:"errorLine,omitempty"` + ErrorColumn int `json:"errorColumn,omitempty"` + ErrorDetails string `json:"errorDetails,omitempty"` + Result int `json:"result,omitempty"` +} + +type LRESTError struct { + Code string `json:"code,omitempty"` + Message string `json:"message,omitempty"` + Type string `json:"type,omitempty"` + Instance string `json:"instance,omitempty"` +} + +var ( + lrpdbPhaseCreate = "Creating" + lrpdbPhasePlug = "Plugging" + lrpdbPhaseUnplug = "Unplugging" + lrpdbPhaseClone = "Cloning" + lrpdbPhaseFinish = "Finishing" + lrpdbPhaseReady = "Ready" + lrpdbPhaseDelete = "Deleting" + lrpdbPhaseModify = "Modifying" + lrpdbPhaseMap = "Mapping" + lrpdbPhaseStatus = "CheckingState" + lrpdbPhaseFail = "Failed" + lrpdbPhaseAlterPlug = "AlterPlugDb" + lrpdbPhaseSpare = "NoAction" +) + +const LRPDBFinalizer = "database.oracle.com/LRPDBfinalizer" + +var tde_Password string +var tde_Secret string +var flood_control bool = false +var assertiveLpdbDeletion bool = false /* Global variable for assertive pdb deletion */ +/* + We need to record the config map name after pdb creation + in order to use it during open and clone op if config map + name is not set the open and clone yaml file +*/ +var globalconfigmap string +var globalsqlcode int + +/* mind https://github.com/kubernetes-sigs/kubebuilder/issues/549 */ +//+kubebuilder:rbac:groups=database.oracle.com,resources=lrpdbs,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=database.oracle.com,resources=events,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=database.oracle.com,resources=lrpdbs/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=database.oracle.com,resources=lrpdbs/finalizers,verbs=get;create;update;patch;delete + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the LRPDB object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.9.2/pkg/reconcile +func (r *LRPDBReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("multitenantoperator", req.NamespacedName) + log.Info("Reconcile requested") + + reconcilePeriod := r.Interval * time.Second + requeueY := ctrl.Result{Requeue: true, RequeueAfter: reconcilePeriod} + requeueN := ctrl.Result{} + + var err error + lrpdb := &dbapi.LRPDB{} + + // Execute for every reconcile + defer func() { + //log.Info("DEFER LRPDB", "Name", lrpdb.Name, "Phase", lrpdb.Status.Phase, "Status", strconv.FormatBool(lrpdb.Status.Status)) + if !lrpdb.Status.Status { + if lrpdb.Status.Phase == lrpdbPhaseReady { + lrpdb.Status.Status = true + } + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + } + }() + + err = r.Client.Get(context.TODO(), req.NamespacedName, lrpdb) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("LRPDB Resource Not found", "Name", lrpdb.Name) + // Request object not found, could have been deleted after reconcile req. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + lrpdb.Status.Status = true + return requeueN, nil + } + // Error reading the object - requeue the req. + return requeueY, err + } + + // Finalizer section + err = r.manageLRPDBDeletion2(ctx, req, lrpdb) + if err != nil { + log.Info("Reconcile queued") + return requeueY, nil + } + + // Check for Duplicate LRPDB + if !lrpdb.Status.Status { + err = r.checkDuplicateLRPDB(ctx, req, lrpdb) + if err != nil { + return requeueN, nil + } + } + + action := strings.ToUpper(lrpdb.Spec.Action) + /* + Bug 36714702 - LREST OPERATOR - POST ALTER PDB OPTION LRPDB STATUS INTERMITTENTLY + SHOWS "WAITING FOR LRPDB PARAMETER TO BE MODIFIED" + introducing additional check to avoid alter system repetition during + reconciliation loop + */ + if lrpdb.Status.Phase == lrpdbPhaseReady { + if (lrpdb.Status.Action != "" || action != "NOACTION") && (action == "ALTER" || action == "MODIFY" || action == "STATUS" || lrpdb.Status.Action != action) { + lrpdb.Status.Status = false + } else { + err = r.getLRPDBState(ctx, req, lrpdb) + if err != nil { + lrpdb.Status.Phase = lrpdbPhaseFail + } else { + lrpdb.Status.Phase = lrpdbPhaseReady + lrpdb.Status.Msg = "Success" + } + r.Status().Update(ctx, lrpdb) + } + } + + if !lrpdb.Status.Status { + r.validatePhase(ctx, req, lrpdb) + phase := lrpdb.Status.Phase + log.Info("LRPDB:", "Name", lrpdb.Name, "Phase", phase, "Status", strconv.FormatBool(lrpdb.Status.Status)) + + switch phase { + case lrpdbPhaseCreate: + err = r.createLRPDB(ctx, req, lrpdb) + case lrpdbPhaseClone: + err = r.cloneLRPDB(ctx, req, lrpdb) + case lrpdbPhasePlug: + err = r.plugLRPDB(ctx, req, lrpdb) + case lrpdbPhaseUnplug: + err = r.unplugLRPDB(ctx, req, lrpdb) + case lrpdbPhaseModify: + err = r.modifyLRPDB(ctx, req, lrpdb) + case lrpdbPhaseDelete: + err = r.deleteLRPDB(ctx, req, lrpdb) + case lrpdbPhaseStatus: + err = r.getLRPDBState(ctx, req, lrpdb) + case lrpdbPhaseMap: + err = r.mapLRPDB(ctx, req, lrpdb) + case lrpdbPhaseFail: + err = r.mapLRPDB(ctx, req, lrpdb) + case lrpdbPhaseAlterPlug: + err = r.alterSystemLRPDB(ctx, req, lrpdb) + default: + log.Info("DEFAULT:", "Name", lrpdb.Name, "Phase", phase, "Status", strconv.FormatBool(lrpdb.Status.Status)) + return requeueN, nil + } + lrpdb.Status.Action = strings.ToUpper(lrpdb.Spec.Action) + if err != nil { + lrpdb.Status.Phase = lrpdbPhaseFail + lrpdb.Status.SqlCode = globalsqlcode + } else { + lrpdb.Status.Phase = lrpdbPhaseReady + lrpdb.Status.Msg = "Success" + } + } + + r.ManageConfigMapForCloningAndPlugin(ctx, req, lrpdb) + lrpdb.Status.BitStatStr = bitmaskprint(lrpdb.Status.Bitstat) + + log.Info("Reconcile completed") + return requeueY, nil +} + +/* +************************************************ + - Validate the LRPDB Spec + /*********************************************** +*/ +func (r *LRPDBReconciler) validatePhase(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) { + + log := r.Log.WithValues("validatePhase", req.NamespacedName) + + action := strings.ToUpper(lrpdb.Spec.Action) + + log.Info("Validating LRPDB phase for: "+lrpdb.Name, "Action", action) + + switch action { + case "CREATE": + lrpdb.Status.Phase = lrpdbPhaseCreate + case "CLONE": + lrpdb.Status.Phase = lrpdbPhaseClone + case "PLUG": + lrpdb.Status.Phase = lrpdbPhasePlug + case "UNPLUG": + lrpdb.Status.Phase = lrpdbPhaseUnplug + case "MODIFY": + lrpdb.Status.Phase = lrpdbPhaseModify + case "DELETE": + lrpdb.Status.Phase = lrpdbPhaseDelete + case "STATUS": + lrpdb.Status.Phase = lrpdbPhaseStatus + case "MAP": + lrpdb.Status.Phase = lrpdbPhaseMap + case "ALTER": + lrpdb.Status.Phase = lrpdbPhaseAlterPlug + case "NOACTION": + lrpdb.Status.Phase = lrpdbPhaseStatus + + } + + log.Info("Validation complete") +} + +/* + This function scans the list of crd + pdb to verify the existence of the + pdb (crd) that we want to clone. + Bug 36752925 - LREST OPERATOR - CLONE NON-EXISTENT + PDB CREATES A LRPDB WITH STATUS FAILED + + return 1 - CRD found + return 0 - CRD not found / Stop clone process + + Bug 36753107 - LREST OPERATOR - CLONE + CLOSED PDB SUCCESSFULLY CLONES + +*/ + +func (r *LRPDBReconciler) checkPDBforCloninig(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB, targetPdbName string) (int, error) { + log := r.Log.WithValues("checkDuplicateLRPDB", req.NamespacedName) + var pdbCounter int + pdbCounter = 0 + + lrpdbList := &dbapi.LRPDBList{} + listOpts := []client.ListOption{client.InNamespace(req.Namespace), client.MatchingFields{"spec.pdbName": targetPdbName}} + err := r.List(ctx, lrpdbList, listOpts...) + if err != nil { + log.Info("Failed to list lrpdbs", "Namespace", req.Namespace, "Error", err) + return 0, err + } + if len(lrpdbList.Items) == 0 { + log.Info("No pdbs available") + return pdbCounter, err + } + + for _, p := range lrpdbList.Items { + fmt.Printf("DEBUGCLONE %s %s %i\n", p.Spec.LRPDBName, targetPdbName, pdbCounter) + if p.Spec.LRPDBName == targetPdbName { + log.Info("Found " + targetPdbName + " in the crd list") + if p.Status.OpenMode == "MOUNTED" { + log.Info("Cannot clone a mounted pdb") + return pdbCounter, err + } + pdbCounter++ + fmt.Printf("DEBUGCLONE %s %s %i\n", p.Spec.LRPDBName, targetPdbName, pdbCounter) + return pdbCounter, err + } + + } + return pdbCounter, err +} + +/* +*************************************************************** + - Check for Duplicate LRPDB. Same LRPDB name on the same LREST resource. + +/************************************************************** +*/ +func (r *LRPDBReconciler) checkDuplicateLRPDB(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) error { + + log := r.Log.WithValues("checkDuplicateLRPDB", req.NamespacedName) + + // Name of the LREST CR that holds the LREST container + lrestResName := lrpdb.Spec.CDBResName + //lrestame := lrpdb.Spec.LRESTName + + // Name of the LRPDB resource + lrpdbResName := lrpdb.Spec.LRPDBName + + lrpdbList := &dbapi.LRPDBList{} + + listOpts := []client.ListOption{client.InNamespace(req.Namespace), client.MatchingFields{"spec.pdbName": lrpdbResName}} + + // List retrieves list of objects for a given namespace and list options. + err := r.List(ctx, lrpdbList, listOpts...) + if err != nil { + log.Info("Failed to list lrpdbs", "Namespace", req.Namespace, "Error", err) + return err + } + + if len(lrpdbList.Items) == 0 { + log.Info("No lrpdbs found for LRPDBName: "+lrpdbResName, "CDBResName", lrestResName) + return nil + } + + for _, p := range lrpdbList.Items { + log.Info("Found LRPDB: " + p.Name) + if (p.Name != lrpdb.Name) && (p.Spec.CDBResName == lrestResName) { + log.Info("Duplicate LRPDB found") + lrpdb.Status.Msg = "LRPDB Resource already exists" + lrpdb.Status.Status = false + lrpdb.Status.Phase = lrpdbPhaseFail + return errors.New("Duplicate LRPDB found") + } + } + return nil +} + +/* +*************************************************************** + - Get the Custom Resource for the LREST mentioned in the LRPDB Spec + /************************************************************** +*/ +func (r *LRPDBReconciler) getLRESTResource(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) (dbapi.LREST, error) { + + log := r.Log.WithValues("getLRESTResource", req.NamespacedName) + + var lrest dbapi.LREST // LREST CR corresponding to the LREST name specified in the LRPDB spec + + // Name of the LREST CR that holds the LREST container + lrestResName := lrpdb.Spec.CDBResName + lrestNamespace := lrpdb.Spec.CDBNamespace + + log.Info("lrestResName...........:" + lrestResName) + log.Info("lrestNamespace.........:" + lrestNamespace) + + // Get LREST CR corresponding to the LREST name specified in the LRPDB spec + err := r.Get(context.Background(), client.ObjectKey{ + Namespace: lrestNamespace, + Name: lrestResName, + }, &lrest) + + if err != nil { + log.Info("Failed to get CRD for LREST", "Name", lrestResName, "Namespace", lrestNamespace, "Error", err.Error()) + lrpdb.Status.Msg = "Unable to get CRD for LREST : " + lrestResName + r.Status().Update(ctx, lrpdb) + return lrest, err + } + + log.Info("Found CR for LREST", "Name", lrestResName, "CR Name", lrest.Name) + return lrest, nil +} + +/* +*************************************************************** + - Get the LREST Pod for the LREST mentioned in the LRPDB Spec + /************************************************************** +*/ +func (r *LRPDBReconciler) getLRESTPod(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) (corev1.Pod, error) { + + log := r.Log.WithValues("getLRESTPod", req.NamespacedName) + + var lrestPod corev1.Pod // LREST Pod container with connection to the concerned LREST + + // Name of the LREST CR that holds the LREST container + lrestResName := lrpdb.Spec.CDBResName + + // Get LREST Pod associated with the LREST Name specified in the LRPDB Spec + err := r.Get(context.Background(), client.ObjectKey{ + Namespace: req.Namespace, + Name: lrestResName + "-lrest", + }, &lrestPod) + + if err != nil { + log.Info("Failed to get Pod for LREST", "Name", lrestResName, "Namespace", req.Namespace, "Error", err.Error()) + lrpdb.Status.Msg = "Unable to get LREST Pod for LREST : " + lrestResName + return lrestPod, err + } + + log.Info("Found LREST Pod for LREST", "Name", lrestResName, "Pod Name", lrestPod.Name, "LREST Container hostname", lrestPod.Spec.Hostname) + return lrestPod, nil +} + +/* +************************************************ + - Get Secret Key for a Secret Name + /*********************************************** +*/ +func (r *LRPDBReconciler) getSecret(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB, secretName string, keyName string) (string, error) { + + log := r.Log.WithValues("getSecret", req.NamespacedName) + + secret := &corev1.Secret{} + err := r.Get(ctx, types.NamespacedName{Name: secretName, Namespace: lrpdb.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + secretName) + lrpdb.Status.Msg = "Secret not found:" + secretName + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + + return string(secret.Data[keyName]), nil +} + +/* +************************************************ + - Issue a REST API Call to the LREST container + /*********************************************** +*/ +func (r *LRPDBReconciler) callAPI(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB, url string, payload map[string]string, action string) (string, error) { + log := r.Log.WithValues("callAPI", req.NamespacedName) + + var err error + + secret := &corev1.Secret{} + + err = r.Get(ctx, types.NamespacedName{Name: lrpdb.Spec.LRPDBTlsKey.Secret.SecretName, Namespace: lrpdb.Namespace}, secret) + + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + lrpdb.Spec.LRPDBTlsKey.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + rsaKeyPEM := secret.Data[lrpdb.Spec.LRPDBTlsKey.Secret.Key] + + err = r.Get(ctx, types.NamespacedName{Name: lrpdb.Spec.LRPDBTlsCrt.Secret.SecretName, Namespace: lrpdb.Namespace}, secret) + + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + lrpdb.Spec.LRPDBTlsCrt.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + + rsaCertPEM := secret.Data[lrpdb.Spec.LRPDBTlsCrt.Secret.Key] + + err = r.Get(ctx, types.NamespacedName{Name: lrpdb.Spec.LRPDBTlsCat.Secret.SecretName, Namespace: lrpdb.Namespace}, secret) + + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + lrpdb.Spec.LRPDBTlsCat.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + + caCert := secret.Data[lrpdb.Spec.LRPDBTlsCat.Secret.Key] + /* + r.Recorder.Eventf(lrpdb, corev1.EventTypeWarning, "LRESTINFO", string(rsaKeyPEM)) + r.Recorder.Eventf(lrpdb, corev1.EventTypeWarning, "LRESTINFO", string(rsaCertPEM)) + r.Recorder.Eventf(lrpdb, corev1.EventTypeWarning, "LRESTINFO", string(caCert)) + */ + + certificate, err := tls.X509KeyPair([]byte(rsaCertPEM), []byte(rsaKeyPEM)) + if err != nil { + lrpdb.Status.Msg = "Error tls.X509KeyPair" + return "", err + } + + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + /* + tlsConf := &tls.Config{Certificates: []tls.Certificate{certificate}, + RootCAs: caCertPool} + */ + tlsConf := &tls.Config{Certificates: []tls.Certificate{certificate}, + RootCAs: caCertPool, + //MinVersion: tls.VersionTLS12, + CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256}, + PreferServerCipherSuites: true, + CipherSuites: []uint16{ + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + }, + } + + tr := &http.Transport{TLSClientConfig: tlsConf} + + httpclient := &http.Client{Transport: tr} + + log.Info("Issuing REST call", "URL", url, "Action", action) + + webUser, err := r.getEncriptedSecret(ctx, req, lrpdb, lrpdb.Spec.WebLrpdbServerUser.Secret.SecretName, lrpdb.Spec.WebLrpdbServerUser.Secret.Key, lrpdb.Spec.LRPDBPriKey.Secret.SecretName, lrpdb.Spec.LRPDBPriKey.Secret.Key) + if err != nil { + log.Error(err, "Unable to get webuser account name ") + return "", err + } + + webUserPwd, err := r.getEncriptedSecret(ctx, req, lrpdb, lrpdb.Spec.WebLrpdbServerPwd.Secret.SecretName, lrpdb.Spec.WebLrpdbServerPwd.Secret.Key, lrpdb.Spec.LRPDBPriKey.Secret.SecretName, lrpdb.Spec.LRPDBPriKey.Secret.Key) + if err != nil { + log.Error(err, "Unable to get webuser account password ") + return "", err + } + + var httpreq *http.Request + if action == "GET" { + httpreq, err = http.NewRequest(action, url, nil) + } else { + jsonValue, _ := json.Marshal(payload) + httpreq, err = http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) + } + + if err != nil { + log.Info("Unable to create HTTP Request for LRPDB : "+lrpdb.Name, "err", err.Error()) + return "", err + } + + httpreq.Header.Add("Accept", "application/json") + httpreq.Header.Add("Content-Type", "application/json") + httpreq.SetBasicAuth(webUser, webUserPwd) + + resp, err := httpclient.Do(httpreq) + if err != nil { + errmsg := err.Error() + log.Error(err, "Failed - Could not connect to LREST Pod", "err", err.Error()) + lrpdb.Status.Msg = "Error: Could not connect to LREST Pod" + r.Recorder.Eventf(lrpdb, corev1.EventTypeWarning, "LRESTError", errmsg) + return "", err + } + + r.Recorder.Eventf(lrpdb, corev1.EventTypeWarning, "Done", lrpdb.Spec.CDBResName) + if resp.StatusCode != http.StatusOK { + bb, _ := ioutil.ReadAll(resp.Body) + + if resp.StatusCode == 404 { + lrpdb.Status.ConnString = "" + lrpdb.Status.Msg = lrpdb.Spec.LRPDBName + " not found" + + } else { + if flood_control == false { + lrpdb.Status.Msg = "LREST Error - HTTP Status Code:" + strconv.Itoa(resp.StatusCode) + } + } + + if flood_control == false { + log.Info("LREST Error - HTTP Status Code :"+strconv.Itoa(resp.StatusCode), "Err", string(bb)) + } + + var apiErr LRESTError + json.Unmarshal([]byte(bb), &apiErr) + if flood_control == false { + r.Recorder.Eventf(lrpdb, corev1.EventTypeWarning, "LRESTError", "Failed: %s", apiErr.Message) + } + fmt.Printf("\n================== APIERR ======================\n") + fmt.Printf("%+v \n", apiErr) + fmt.Printf(string(bb)) + fmt.Printf("URL=%s\n", url) + fmt.Printf("resp.StatusCode=%s\n", strconv.Itoa(resp.StatusCode)) + fmt.Printf("\n================== APIERR ======================\n") + flood_control = true + return "", errors.New("LREST Error") + } + flood_control = false + + defer resp.Body.Close() + + bodyBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + fmt.Print(err.Error()) + } + respData := string(bodyBytes) + fmt.Print("CALL API return msg.....:") + fmt.Println(string(bodyBytes)) + + var apiResponse restSQLCollection + json.Unmarshal([]byte(bodyBytes), &apiResponse) + fmt.Printf("===> %#v\n", apiResponse) + fmt.Printf("===> %+v\n", apiResponse) + + errFound := false + for _, sqlItem := range apiResponse.Items { + if sqlItem.ErrorDetails != "" { + log.Info("LREST Error - Oracle Error Code :" + strconv.Itoa(sqlItem.ErrorCode)) + if !errFound { + lrpdb.Status.Msg = sqlItem.ErrorDetails + } + r.Recorder.Eventf(lrpdb, corev1.EventTypeWarning, "OraError", "%s", sqlItem.ErrorDetails) + errFound = true + } + } + + if errFound { + return "", errors.New("Oracle Error") + } + + return respData, nil +} + +/* +************************************************ + - Create a LRPDB + +*********************************************** +*/ +func (r *LRPDBReconciler) createLRPDB(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) error { + + log := r.Log.WithValues("createLRPDB", req.NamespacedName) + + var err error + var tde_Password string + var tde_Secret string + + log.Info("call getLRESTResource \n") + lrest, err := r.getLRESTResource(ctx, req, lrpdb) + if err != nil { + return err + } + + lrpdbAdminName, err := r.getEncriptedSecret(ctx, req, lrpdb, lrpdb.Spec.AdminpdbUser.Secret.SecretName, lrpdb.Spec.AdminpdbUser.Secret.Key, lrpdb.Spec.LRPDBPriKey.Secret.SecretName, lrpdb.Spec.LRPDBPriKey.Secret.Key) + if err != nil { + log.Error(err, "Unable to find pdb admin user ") + return err + } + + lrpdbAdminPwd, err := r.getEncriptedSecret(ctx, req, lrpdb, lrpdb.Spec.AdminpdbPass.Secret.SecretName, lrpdb.Spec.AdminpdbPass.Secret.Key, lrpdb.Spec.LRPDBPriKey.Secret.SecretName, lrpdb.Spec.LRPDBPriKey.Secret.Key) + + if err != nil { + log.Error(err, "Unable to find pdb admin password ") + return err + } + + err = r.getLRPDBState(ctx, req, lrpdb) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Check LRPDB not existence completed", "LRPDB Name", lrpdb.Spec.LRPDBName) + } + + } else { + + lrpdb.Status.Phase = lrpdbPhaseFail + lrpdb.Status.Msg = "PDB " + lrpdb.Spec.LRPDBName + " already exists " + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + log.Info("Database already exists ", "LRPDB Name", lrpdb.Spec.LRPDBName) + err := fmt.Errorf("%v", 65012) + return err + } + + values := map[string]string{ + "method": "CREATE", + "pdb_name": lrpdb.Spec.LRPDBName, + "adminName": lrpdbAdminName, + "adminPwd": lrpdbAdminPwd, + "fileNameConversions": lrpdb.Spec.FileNameConversions, + "reuseTempFile": strconv.FormatBool(*(lrpdb.Spec.ReuseTempFile)), + "unlimitedStorage": strconv.FormatBool(*(lrpdb.Spec.UnlimitedStorage)), + "totalSize": lrpdb.Spec.TotalSize, + "tempSize": lrpdb.Spec.TempSize, + "getScript": strconv.FormatBool(*(lrpdb.Spec.GetScript))} + + fmt.Printf("===== PAYLOAD ===\n") + fmt.Print(" method ", values["method"], "\n") + fmt.Print(" pdb_name ", values["pdb_name"], "\n") + fmt.Print(" adminName ", values["adminName"], "\n") + fmt.Print(" adminPwd --------------\n") + fmt.Print(" fileNameConversions ", values["fileNameConversions"], "\n") + fmt.Print(" unlimitedStorage ", values["unlimitedStorage"], "\n") + fmt.Print(" reuseTempFile ", values["reuseTempFile"], "\n") + fmt.Print(" tempSize ", values["tempSize"], "\n") + fmt.Print(" totalSize ", values["totalSize"], "\n") + fmt.Print(" getScript ", values["getScript"], "\n") + + if *(lrpdb.Spec.LTDEImport) { + tde_Password, err = r.getSecret(ctx, req, lrpdb, lrpdb.Spec.LTDEPassword.Secret.SecretName, lrpdb.Spec.LTDEPassword.Secret.Key) + if err != nil { + return err + } + tde_Secret, err = r.getSecret(ctx, req, lrpdb, lrpdb.Spec.LTDESecret.Secret.SecretName, lrpdb.Spec.LTDESecret.Secret.Key) + if err != nil { + return err + } + + tde_Secret = tde_Secret[:len(tde_Secret)-1] + tde_Password = tde_Secret[:len(tde_Password)-1] + values["tde_Password"] = tde_Password + values["tdeKeystorePath"] = lrpdb.Spec.LTDEKeystorePath + values["tde_Secret"] = tde_Secret + } + + //url := "https://" + lrpdb.Spec.CDBResName + "-lrest:" + strconv.Itoa(lrest.Spec.LRESTPort) + "/database/pdbs/" + url := r.BaseUrl(ctx, req, lrpdb, lrest) + fmt.Print("============================================================\n") + fmt.Print(url) + fmt.Print("\n============================================================\n") + lrpdb.Status.TotalSize = lrpdb.Spec.TotalSize + lrpdb.Status.Phase = lrpdbPhaseCreate + lrpdb.Status.Msg = "Waiting for LRPDB to be created" + + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + + respData, err := NewCallLAPI(r, ctx, req, lrpdb, url, values, "POST") + if err != nil { + log.Error(err, "Failure NewCallLAPI( "+url+")", "err", err.Error()) + return err + } + + r.GetSqlCode(respData, &(lrpdb.Status.SqlCode)) + globalsqlcode = lrpdb.Status.SqlCode + if lrpdb.Status.SqlCode != 0 { + err := fmt.Errorf("%v", lrpdb.Status.SqlCode) + return err + } + + r.Recorder.Eventf(lrpdb, corev1.EventTypeNormal, + "Created", "LRPDB '%s' created successfully", lrpdb.Spec.LRPDBName) + + if lrest.Spec.DBServer != "" { + lrpdb.Status.ConnString = + lrest.Spec.DBServer + ":" + strconv.Itoa(lrest.Spec.DBPort) + "/" + lrpdb.Spec.LRPDBName + } else { + log.Info("Parsing connectstring") + lrpdb.Status.ConnString = lrest.Spec.DBTnsurl + parseTnsAlias(&(lrpdb.Status.ConnString), &(lrpdb.Spec.LRPDBName)) + } + + assertiveLpdbDeletion = lrpdb.Spec.AssertiveLrpdbDeletion + if lrpdb.Spec.AssertiveLrpdbDeletion == true { + r.Recorder.Eventf(lrpdb, corev1.EventTypeNormal, "Created", "PDB '%s' assertive pdb deletion turned on", lrpdb.Spec.LRPDBName) + } + + r.getLRPDBState(ctx, req, lrpdb) + log.Info("Created LRPDB Resource", "LRPDB Name", lrpdb.Spec.LRPDBName) + + if bit(lrpdb.Status.Bitstat, MPINIT) == false { + r.InitConfigMap(ctx, req, lrpdb) + Cardinality, _ := r.ApplyConfigMap(ctx, req, lrpdb) + log.Info("Config Map Cardinality " + strconv.Itoa(int(Cardinality))) + } + + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + + return nil +} + +/* +************************************************ + - Clone a LRPDB + /*********************************************** +*/ +func (r *LRPDBReconciler) cloneLRPDB(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) error { + + if lrpdb.Spec.LRPDBName == lrpdb.Spec.SrcLRPDBName { + return nil + } + + log := r.Log.WithValues("cloneLRPDB", req.NamespacedName) + + globalsqlcode = 0 + var err error + + lrest, err := r.getLRESTResource(ctx, req, lrpdb) + if err != nil { + return err + } + + /* Prevent cloning an existing lrpdb */ + err = r.getLRPDBState(ctx, req, lrpdb) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Check LRPDB not existence completed", "LRPDB Name", lrpdb.Spec.LRPDBName) + } + + } else { + log.Info("Database already exists ", "LRPDB Name", lrpdb.Spec.LRPDBName) + return nil + } + + values := map[string]string{ + "method": "CLONE", + "pdb_name": lrpdb.Spec.LRPDBName, + "srcPdbName": lrpdb.Spec.SrcLRPDBName, + "reuseTempFile": strconv.FormatBool(*(lrpdb.Spec.ReuseTempFile)), + "unlimitedStorage": strconv.FormatBool(*(lrpdb.Spec.UnlimitedStorage)), + "getScript": strconv.FormatBool(*(lrpdb.Spec.GetScript))} + + //* check the existence of lrpdb.Spec.SrcLRPDBName // + var allErrs field.ErrorList + pdbCounter, _ := r.checkPDBforCloninig(ctx, req, lrpdb, lrpdb.Spec.SrcLRPDBName) + if pdbCounter == 0 { + log.Info("target pdb " + lrpdb.Spec.SrcLRPDBName + " does not exists or is not open") + allErrs = append(allErrs, field.NotFound(field.NewPath("Spec").Child("LRPDBName"), " "+lrpdb.Spec.LRPDBName+" does not exist : failure")) + r.Delete(context.Background(), lrpdb, client.GracePeriodSeconds(1)) + return nil + } + + if lrpdb.Spec.SparseClonePath != "" { + values["sparseClonePath"] = lrpdb.Spec.SparseClonePath + } + if lrpdb.Spec.FileNameConversions != "" { + values["fileNameConversions"] = lrpdb.Spec.FileNameConversions + } + if lrpdb.Spec.TotalSize != "" { + values["totalSize"] = lrpdb.Spec.TotalSize + } + if lrpdb.Spec.TempSize != "" { + values["tempSize"] = lrpdb.Spec.TempSize + } + + url := r.BaseUrl(ctx, req, lrpdb, lrest) + lrpdb.Spec.LRPDBName + "/" + + lrpdb.Status.Phase = lrpdbPhaseClone + lrpdb.Status.Msg = "Waiting for LRPDB to be cloned" + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + respData, err := NewCallLAPI(r, ctx, req, lrpdb, url, values, "POST") + if err != nil { + log.Error(err, "Failure NewCallLAPI( "+url+")", "err", err.Error()) + return err + } + + r.GetSqlCode(respData, &(lrpdb.Status.SqlCode)) + globalsqlcode = lrpdb.Status.SqlCode + + if lrpdb.Status.SqlCode != 0 { + errclone := errors.New("Cannot clone database: ora-" + strconv.Itoa(lrpdb.Status.SqlCode)) + log.Info("Cannot clone database ora-" + strconv.Itoa(lrpdb.Status.SqlCode)) + lrpdb.Status.Msg = lrpdb.Spec.SrcLRPDBName + " is open in mount cannot clone " + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + return errclone + } + + r.Recorder.Eventf(lrpdb, corev1.EventTypeNormal, "Created", "LRPDB '%s' cloned successfully", lrpdb.Spec.LRPDBName) + + if lrest.Spec.DBServer != "" { + lrpdb.Status.ConnString = lrest.Spec.DBServer + ":" + strconv.Itoa(lrest.Spec.DBPort) + "/" + lrpdb.Spec.LRPDBName + } else { + lrpdb.Status.ConnString = lrest.Spec.DBTnsurl + parseTnsAlias(&(lrpdb.Status.ConnString), &(lrpdb.Spec.LRPDBName)) + + } + assertiveLpdbDeletion = lrpdb.Spec.AssertiveLrpdbDeletion + if lrpdb.Spec.AssertiveLrpdbDeletion == true { + r.Recorder.Eventf(lrpdb, corev1.EventTypeNormal, "Clone", "PDB '%s' assertive pdb deletion turned on", lrpdb.Spec.LRPDBName) + } + + log.Info("Cloned LRPDB successfully", "Source LRPDB Name", lrpdb.Spec.SrcLRPDBName, "Clone LRPDB Name", lrpdb.Spec.LRPDBName) + r.getLRPDBState(ctx, req, lrpdb) + return nil +} + +/* +************************************************ + - Plug a LRPDB + +*********************************************** +*/ +func (r *LRPDBReconciler) plugLRPDB(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) error { + + log := r.Log.WithValues("plugLRPDB", req.NamespacedName) + globalsqlcode = 0 + + var err error + var tde_Password string + var tde_Secret string + + lrest, err := r.getLRESTResource(ctx, req, lrpdb) + if err != nil { + return err + } + + values := map[string]string{ + "method": "PLUG", + "xmlFileName": lrpdb.Spec.XMLFileName, + "pdb_name": lrpdb.Spec.LRPDBName, + "sourceFileNameConversions": lrpdb.Spec.SourceFileNameConversions, + "copyAction": lrpdb.Spec.CopyAction, + "fileNameConversions": lrpdb.Spec.FileNameConversions, + "unlimitedStorage": strconv.FormatBool(*(lrpdb.Spec.UnlimitedStorage)), + "reuseTempFile": strconv.FormatBool(*(lrpdb.Spec.ReuseTempFile)), + "totalSize": lrpdb.Spec.TotalSize, + "tempSize": lrpdb.Spec.TempSize, + "getScript": strconv.FormatBool(*(lrpdb.Spec.GetScript))} + + if *(lrpdb.Spec.LTDEImport) { + tde_Password, err = r.getSecret(ctx, req, lrpdb, lrpdb.Spec.LTDEPassword.Secret.SecretName, lrpdb.Spec.LTDEPassword.Secret.Key) + if err != nil { + return err + } + tde_Secret, err = r.getSecret(ctx, req, lrpdb, lrpdb.Spec.LTDESecret.Secret.SecretName, lrpdb.Spec.LTDESecret.Secret.Key) + if err != nil { + return err + } + + tde_Secret = tde_Secret[:len(tde_Secret)-1] + tde_Password = tde_Secret[:len(tde_Password)-1] + values["tde_Password"] = tde_Password + values["tdeKeystorePath"] = lrpdb.Spec.LTDEKeystorePath + values["tde_Secret"] = tde_Secret + values["tdeImport"] = strconv.FormatBool(*(lrpdb.Spec.LTDEImport)) + } + if *(lrpdb.Spec.AsClone) { + values["asClone"] = strconv.FormatBool(*(lrpdb.Spec.AsClone)) + } + + url := r.BaseUrl(ctx, req, lrpdb, lrest) + + lrpdb.Status.TotalSize = lrpdb.Spec.TotalSize + lrpdb.Status.Phase = lrpdbPhasePlug + lrpdb.Status.Msg = "Waiting for LRPDB to be plugged" + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + + respData, err := NewCallLAPI(r, ctx, req, lrpdb, url, values, "POST") + if err != nil { + log.Error(err, "Failure NewCallLAPI( "+url+")", "err", err.Error()) + return err + } + + r.GetSqlCode(respData, &(lrpdb.Status.SqlCode)) + globalsqlcode = lrpdb.Status.SqlCode + + if lrpdb.Status.SqlCode != 0 { + log.Info("Plug database failure........:" + strconv.Itoa(lrpdb.Status.SqlCode)) + err = fmt.Errorf("%v", lrpdb.Status.SqlCode) + return err + } + + r.Recorder.Eventf(lrpdb, corev1.EventTypeNormal, "Created", "LRPDB '%s' plugged successfully", lrpdb.Spec.LRPDBName) + + if lrest.Spec.DBServer != "" { + lrpdb.Status.ConnString = lrest.Spec.DBServer + ":" + strconv.Itoa(lrest.Spec.DBPort) + "/" + lrpdb.Spec.LRPDBName + } else { + log.Info("Parsing connectstring") + lrpdb.Status.ConnString = lrest.Spec.DBTnsurl + parseTnsAlias(&(lrpdb.Status.ConnString), &(lrpdb.Spec.LRPDBName)) + } + + assertiveLpdbDeletion = lrpdb.Spec.AssertiveLrpdbDeletion + if lrpdb.Spec.AssertiveLrpdbDeletion == true { + r.Recorder.Eventf(lrpdb, corev1.EventTypeNormal, "Plug", "PDB '%s' assertive pdb deletion turned on", lrpdb.Spec.LRPDBName) + } + + log.Info("Successfully plugged LRPDB", "LRPDB Name", lrpdb.Spec.LRPDBName) + r.getLRPDBState(ctx, req, lrpdb) + return nil +} + +/* +************************************************ + - Unplug a LRPDB + +*********************************************** +*/ +func (r *LRPDBReconciler) unplugLRPDB(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) error { + + log := r.Log.WithValues("unplugLRPDB", req.NamespacedName) + globalsqlcode = 0 + + var err error + var tde_Password string + var tde_Secret string + + lrest, err := r.getLRESTResource(ctx, req, lrpdb) + if err != nil { + return err + } + + values := map[string]string{ + "method": "UNPLUG", + "xmlFileName": lrpdb.Spec.XMLFileName, + "getScript": strconv.FormatBool(*(lrpdb.Spec.GetScript))} + + if *(lrpdb.Spec.LTDEExport) { + // Get the TDE Password + tde_Password, err = r.getSecret(ctx, req, lrpdb, lrpdb.Spec.LTDEPassword.Secret.SecretName, lrpdb.Spec.LTDEPassword.Secret.Key) + if err != nil { + return err + } + tde_Secret, err = r.getSecret(ctx, req, lrpdb, lrpdb.Spec.LTDESecret.Secret.SecretName, lrpdb.Spec.LTDESecret.Secret.Key) + if err != nil { + return err + } + + tde_Secret = tde_Secret[:len(tde_Secret)-1] + tde_Password = tde_Secret[:len(tde_Password)-1] + values["tde_Password"] = tde_Password + values["tdeKeystorePath"] = lrpdb.Spec.LTDEKeystorePath + values["tde_Secret"] = tde_Secret + values["tdeExport"] = strconv.FormatBool(*(lrpdb.Spec.LTDEExport)) + } + + url := r.BaseUrl(ctx, req, lrpdb, lrest) + lrpdb.Spec.LRPDBName + "/" + + log.Info("CallAPI(url)", "url", url) + lrpdb.Status.Phase = lrpdbPhaseUnplug + lrpdb.Status.Msg = "Waiting for LRPDB to be unplugged" + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + respData, err := NewCallLAPI(r, ctx, req, lrpdb, url, values, "POST") + if err != nil { + log.Error(err, "Failure NewCallLAPI( "+url+")", "err", err.Error()) + return err + } + + r.GetSqlCode(respData, &(lrpdb.Status.SqlCode)) + + if lrpdb.Status.SqlCode != 0 { + globalsqlcode = lrpdb.Status.SqlCode + + lrpdb.Status.Msg = lrpdb.Spec.LRPDBName + " database cannot be unplugged " + log.Info(lrpdb.Spec.LRPDBName + " database cannot be unplugged ") + if lrpdb.Status.SqlCode == 65170 { + log.Info(lrpdb.Spec.XMLFileName + " xml file already exists ") + } + + /* + err := r.Update(ctx, lrpdb) + if err != nil { + log.Info("Fail to update crd", "err", err.Error()) + return err + } + + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status"+lrpdb.Name, "err", err.Error()) + return err + } + */ + + r.Recorder.Eventf(lrpdb, corev1.EventTypeNormal, "Unplugged", " ORA-%s ", strconv.Itoa(lrpdb.Status.SqlCode)) + err = fmt.Errorf("%v", lrpdb.Status.SqlCode) + return err + } + + if controllerutil.ContainsFinalizer(lrpdb, LRPDBFinalizer) { + log.Info("Removing finalizer") + controllerutil.RemoveFinalizer(lrpdb, LRPDBFinalizer) + err = r.Update(ctx, lrpdb) + if err != nil { + log.Info("Could not remove finalizer", "err", err.Error()) + return err + } + lrpdb.Status.Status = true + err = r.Delete(context.Background(), lrpdb, client.GracePeriodSeconds(1)) + if err != nil { + log.Info("Could not delete LRPDB resource", "err", err.Error()) + return err + } + } + + r.Recorder.Eventf(lrpdb, corev1.EventTypeNormal, "Unplugged", "LRPDB '%s' unplugged successfully", lrpdb.Spec.LRPDBName) + globalsqlcode = 0 + log.Info("Successfully unplugged LRPDB resource") + return nil +} + +/************************************************** +Alter system LRPDB +**************************************************/ + +/**just push the trasnsaction **/ +func (r *LRPDBReconciler) alterSystemLRPDB(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) error { + + log := r.Log.WithValues("alterSystemLRPDB", req.NamespacedName) + globalsqlcode = 0 + + var err error + err = r.getLRPDBState(ctx, req, lrpdb) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Warning LRPDB does not exist", "LRPDB Name", lrpdb.Spec.LRPDBName) + return nil + } + return err + } + + lrest, err := r.getLRESTResource(ctx, req, lrpdb) + if err != nil { + log.Info("Cannot find LREST server") + return err + } + + /* alter system payload */ + + values := map[string]string{ + "state": "ALTER", + "alterSystemParameter": lrpdb.Spec.AlterSystemParameter, + "alterSystemValue": lrpdb.Spec.AlterSystemValue, + "parameterScope": lrpdb.Spec.ParameterScope, + } + + lrpdbName := lrpdb.Spec.LRPDBName + url := r.BaseUrl(ctx, req, lrpdb, lrest) + lrpdbName + log.Info("alter system payload...:", "lrpdb.Spec.AlterSystemValue=", lrpdb.Spec.AlterSystemValue) + log.Info("alter system payload...:", "lrpdb.Spec.AlterSystemParameter=", lrpdb.Spec.AlterSystemParameter) + log.Info("alter system payload...:", "lrpdb.Spec.ParameterScope=", lrpdb.Spec.ParameterScope) + log.Info("alter system path.......:", "url=", url) + + lrpdb.Status.Phase = lrpdbPhaseAlterPlug + lrpdb.Status.ModifyOption = lrpdb.Spec.AlterSystem + " " + lrpdb.Spec.ParameterScope + lrpdb.Status.Msg = "Waiting for LRPDB parameter to be modified" + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update lrpdb parameter :"+lrpdb.Name, "err", err.Error()) + return err + } + + respData, err := NewCallLAPI(r, ctx, req, lrpdb, url, values, "POST") + if err != nil { + log.Error(err, "Failure NewCallLAPI( "+url+")", "err", err.Error()) + return err + } + + r.GetSqlCode(respData, &(lrpdb.Status.SqlCode)) + globalsqlcode = lrpdb.Status.SqlCode + + if lrpdb.Status.SqlCode == 0 { + r.Recorder.Eventf(lrpdb, corev1.EventTypeNormal, "Altered", "LRPDB(name,cmd,sqlcode) '%s %s %d' ", lrpdb.Spec.LRPDBName, lrpdb.Spec.AlterSystem, lrpdb.Status.SqlCode) + lrpdb.Status.Phase = lrpdbPhaseReady + lrpdb.Spec.Action = "Noaction" + lrpdb.Status.Action = "Noaction" + lrpdb.Status.Status = true + + if err := r.Update(ctx, lrpdb); err != nil { + log.Error(err, "Cannot rest lrpdb Spec :"+lrpdb.Name, "err", err.Error()) + return err + } + + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update lrpdb parameter :"+lrpdb.Name, "err", err.Error()) + return err + } + return nil + + } + + if lrpdb.Status.SqlCode != 0 { + r.Recorder.Eventf(lrpdb, corev1.EventTypeWarning, "alter system failure", "LRPDB(name,cmd,sqlcode) '%s %s %d' ", lrpdb.Spec.LRPDBName, lrpdb.Spec.AlterSystem, lrpdb.Status.SqlCode) + erralter := errors.New("Error: cannot modify parameter") + + lrpdb.Status.ModifyOption = lrpdb.Spec.AlterSystem + " " + lrpdb.Spec.ParameterScope + lrpdb.Status.Msg = "Failed: cannot modify system parameter" + lrpdb.Status.Phase = lrpdbPhaseStatus + lrpdb.Spec.AlterSystem = "" + lrpdb.Spec.ParameterScope = "" + lrpdb.Spec.Action = "Noaction" + if err := r.Update(ctx, lrpdb); err != nil { + log.Error(err, "Cannot rest lrpdb Spec :"+lrpdb.Name, "err", err.Error()) + return err + } + + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update lrpdb parameter :"+lrpdb.Name, "err", err.Error()) + return err + } + return erralter + } + + lrpdb.Status.Status = false + + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update lrpdb parameter :"+lrpdb.Name, "err", err.Error()) + return err + } + return nil +} + +/************************************************* + * Modify a LRPDB state + ***********************************************/ +func (r *LRPDBReconciler) modifyLRPDB(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) error { + + log := r.Log.WithValues("modifyLRPDB", req.NamespacedName) + r.Recorder.Eventf(lrpdb, corev1.EventTypeNormal, "Modify", "Info:'%s %s %s' ", lrpdb.Spec.LRPDBName, lrpdb.Spec.LRPDBState, lrpdb.Status.ModifyOption) + + var err error + err = r.getLRPDBState(ctx, req, lrpdb) + if err != nil { + if lrpdb.Status.SqlCode == 1403 { + // BUG 36752465 + // We have to handle to verify a non existings results using both + log.Info("Database does not exists ") + r.Delete(context.Background(), lrpdb, client.GracePeriodSeconds(1)) + return nil + } + if apierrors.IsNotFound(err) { + log.Info("Warning LRPDB does not exist", "LRPDB Name", lrpdb.Spec.LRPDBName) + r.Delete(context.Background(), lrpdb, client.GracePeriodSeconds(1)) + return nil + } + return err + } + + /* This scenario is managed by webhook acceptance test ... leave it here anyway */ + if lrpdb.Status.OpenMode == "READ WRITE" && lrpdb.Spec.LRPDBState == "OPEN" && lrpdb.Spec.ModifyOption == "READ WRITE" { + /* Database is already open no action required */ + return nil + } + + if lrpdb.Status.OpenMode == "MOUNTED" && lrpdb.Spec.LRPDBState == "CLOSE" && lrpdb.Spec.ModifyOption == "IMMEDIATE" { + /* Database is already close no action required */ + return nil + } + + lrest, err := r.getLRESTResource(ctx, req, lrpdb) + if err != nil { + return err + } + + values := map[string]string{} + if lrpdb.Spec.LRPDBState == "OPEN" || lrpdb.Spec.LRPDBState == "CLOSE" { + values = map[string]string{ + "state": lrpdb.Spec.LRPDBState, + "modifyOption": lrpdb.Spec.ModifyOption, + "getScript": strconv.FormatBool(*(lrpdb.Spec.GetScript))} + if lrpdb.Spec.LRPDBState == "OPEN" || lrpdb.Spec.LRPDBState == "CLOSE" { + log.Info("MODIFY LRPDB", "lrpdb.Spec.LRPDBState=", lrpdb.Spec.LRPDBState, "lrpdb.Spec.ModifyOption=", lrpdb.Spec.ModifyOption) + log.Info("LRPDB STATUS OPENMODE", "lrpdb.Status.OpenMode=", lrpdb.Status.OpenMode) + } + } + + lrpdbName := lrpdb.Spec.LRPDBName + url := r.BaseUrl(ctx, req, lrpdb, lrest) + lrpdbName + "/status/" + + lrpdb.Status.Phase = lrpdbPhaseModify + if lrpdb.Spec.LRPDBState == "OPEN" || lrpdb.Spec.LRPDBState == "CLOSE" { + lrpdb.Status.ModifyOption = lrpdb.Spec.LRPDBState + "-" + lrpdb.Spec.ModifyOption + } + + lrpdb.Status.Msg = "Waiting for LRPDB to be modified" + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + + respData, err := NewCallLAPI(r, ctx, req, lrpdb, url, values, "POST") + if err != nil { + log.Error(err, "Failure NewCallLAPI( "+url+")", "err", err.Error()) + return err + } + + r.GetSqlCode(respData, &(lrpdb.Status.SqlCode)) + globalsqlcode = lrpdb.Status.SqlCode + + if lrpdb.Spec.LRPDBState == "OPEN" || lrpdb.Spec.LRPDBState == "CLOSE" { + r.Recorder.Eventf(lrpdb, corev1.EventTypeNormal, "Modified", " '%s' modified successfully '%s'", lrpdb.Spec.LRPDBName, lrpdb.Spec.LRPDBState) + } + + if lrest.Spec.DBServer != "" { + lrpdb.Status.ConnString = lrest.Spec.DBServer + ":" + strconv.Itoa(lrest.Spec.DBPort) + "/" + lrpdb.Spec.LRPDBName + } else { + lrpdb.Status.ConnString = lrest.Spec.DBTnsurl + parseTnsAlias(&(lrpdb.Status.ConnString), &(lrpdb.Spec.LRPDBName)) + + } + + lrpdb.Status.Msg = "alter lrpdb completed" + lrpdb.Status.Status = false + lrpdb.Status.Phase = lrpdbPhaseReady + + log.Info("Successfully modified LRPDB state", "LRPDB Name", lrpdb.Spec.LRPDBName) + + /* After database openining we reapply the config map if warning is present */ + if lrpdb.Spec.LRPDBState == "OPEN" { + if bit(lrpdb.Status.Bitstat, MPWARN|MPINIT) { + log.Info("re-apply config map") + r.ApplyConfigMap(ctx, req, lrpdb) + + } + } + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + + //r.getLRPDBState(ctx, req, lrpdb) + return nil +} + +/* +************************************************ + - Get LRPDB State + /*********************************************** +*/ +func (r *LRPDBReconciler) getLRPDBState(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) error { + + log := r.Log.WithValues("getLRPDBState", req.NamespacedName) + + var err error + + lrest, err := r.getLRESTResource(ctx, req, lrpdb) + if err != nil { + return err + } + + lrpdbName := lrpdb.Spec.LRPDBName + url := r.BaseUrl(ctx, req, lrpdb, lrest) + lrpdbName + "/status/" + + lrpdb.Status.Msg = "Getting LRPDB state" + fmt.Print("============================\n") + fmt.Println(lrpdb.Status) + fmt.Print("============================\n") + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + + respData, err := NewCallLAPI(r, ctx, req, lrpdb, url, nil, "GET") + if err != nil { + log.Info("Begin respData") + log.Info(respData) + log.Info("End respData") + lrpdb.Status.Msg = "getLRPDBState failure : check lrpdb status" + lrpdb.Status.Status = false + log.Error(err, "Failure NewCallLAPI( "+url+")", "err", err.Error()) + return err + } + + r.GetSqlCode(respData, &(lrpdb.Status.SqlCode)) + globalsqlcode = lrpdb.Status.SqlCode + + if lrpdb.Status.SqlCode == 1403 { + lrpdb.Status.OpenMode = "unknown" + lrpdb.Status.Msg = "check lrpdb status" + lrpdb.Status.Status = false + return errors.New("NO_DATA_FOUND") + } + + var objmap map[string]interface{} + if err := json.Unmarshal([]byte(respData), &objmap); err != nil { + log.Error(err, "Failed to get state of LRPDB :"+lrpdbName, "err", err.Error()) + } + lrpdb.Status.OpenMode = objmap["open_mode"].(string) + + /* if lrpdb.Status.Phase == lrpdbPhaseCreate && sqlcode == 1403 { + + if lrpdb.Status.OpenMode == "READ WRITE" { + err := r.mapLRPDB(ctx, req, lrpdb) + if err != nil { + log.Info("Fail to Map resource getting LRPDB state") + } + } + + if lrpdb.Status.OpenMode == "MOUNTED" { + err := r.mapLRPDB(ctx, req, lrpdb) + if err != nil { + log.Info("Fail to Map resource getting LRPDB state") + } + } + }*/ + + lrpdb.Status.Msg = "check lrpdb ok" + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + + log.Info("Successfully obtained LRPDB state", "LRPDB Name", lrpdb.Spec.LRPDBName, "State", objmap["open_mode"].(string)) + return nil +} + +/* +************************************************ + - Map Database LRPDB to Kubernetes LRPDB CR + +/*********************************************** +*/ +func (r *LRPDBReconciler) mapLRPDB(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) error { + + log := r.Log.WithValues("mapLRPDB", req.NamespacedName) + + var err error + + lrest, err := r.getLRESTResource(ctx, req, lrpdb) + if err != nil { + return err + } + + log.Info("callapi get to map lrpdb") + + lrpdbName := lrpdb.Spec.LRPDBName + url := r.BaseUrl(ctx, req, lrpdb, lrest) + lrpdbName + log.Info("DEBUG NEW URL " + url) + + respData, err := NewCallLAPI(r, ctx, req, lrpdb, url, nil, "GET") + if err != nil { + log.Error(err, "Failure NewCallLAPI( "+url+")", "err", err.Error()) + return err + } + + var objmap map[string]interface{} + if err := json.Unmarshal([]byte(respData), &objmap); err != nil { + log.Error(err, "Failed json.Unmarshal :"+lrpdbName, "err", err.Error()) + } + + //fmt.Printf("%+v\n", objmap) + totSizeInBytes := objmap["total_size"].(float64) + totSizeInGB := totSizeInBytes / 1024 / 1024 / 1024 + + lrpdb.Status.OpenMode = objmap["open_mode"].(string) + lrpdb.Status.TotalSize = fmt.Sprintf("%4.2f", totSizeInGB) + "G" + assertiveLpdbDeletion = lrpdb.Spec.AssertiveLrpdbDeletion + if lrpdb.Spec.AssertiveLrpdbDeletion == true { + r.Recorder.Eventf(lrpdb, corev1.EventTypeNormal, "Map", "PDB '%s' assertive pdb deletion turned on", lrpdb.Spec.LRPDBName) + } + + if lrest.Spec.DBServer != "" { + lrpdb.Status.ConnString = lrest.Spec.DBServer + ":" + strconv.Itoa(lrest.Spec.DBPort) + "/" + lrpdb.Spec.LRPDBName + } else { + lrpdb.Status.ConnString = lrest.Spec.DBTnsurl + parseTnsAlias(&(lrpdb.Status.ConnString), &(lrpdb.Spec.LRPDBName)) + } + + lrpdb.Status.Phase = lrpdbPhaseReady + + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + + log.Info("Successfully mapped LRPDB to Kubernetes resource", "LRPDB Name", lrpdb.Spec.LRPDBName) + lrpdb.Status.Status = true + return nil +} + +/* +************************************************ + - Delete a LRPDB + /*********************************************** +*/ +func (r *LRPDBReconciler) deleteLRPDB(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) error { + + log := r.Log.WithValues("deleteLRPDB", req.NamespacedName) + + errstate := r.getLRPDBState(ctx, req, lrpdb) + if errstate != nil { + if lrpdb.Status.SqlCode == 1403 { + // BUG 36752336: + log.Info("Database does not exists ") + r.Delete(context.Background(), lrpdb, client.GracePeriodSeconds(1)) + return nil + } + if apierrors.IsNotFound(errstate) { + log.Info("Warning LRPDB does not exist", "LRPDB Name", lrpdb.Spec.LRPDBName) + r.Delete(context.Background(), lrpdb, client.GracePeriodSeconds(1)) + return nil + } + log.Error(errstate, "Failed to update status for :"+lrpdb.Name, "err", errstate.Error()) + return errstate + //* if the pdb does not exists delete the crd *// + + } + + if lrpdb.Status.OpenMode == "READ WRITE" { + + errdel := errors.New("pdb is open cannot delete it") + log.Info("LRPDB is open in read write cannot drop ") + lrpdb.Status.Msg = "LRPDB is open in read write cannot drop " + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + + return errdel + } + + err := r.deleteLRPDBInstance(req, ctx, lrpdb) + if err != nil { + log.Info("Could not delete LRPDB", "LRPDB Name", lrpdb.Spec.LRPDBName, "err", err.Error()) + return err + } + + if controllerutil.ContainsFinalizer(lrpdb, LRPDBFinalizer) { + log.Info("Removing finalizer") + controllerutil.RemoveFinalizer(lrpdb, LRPDBFinalizer) + err := r.Update(ctx, lrpdb) + if err != nil { + log.Info("Could not remove finalizer", "err", err.Error()) + return err + } + lrpdb.Status.Status = true + err = r.Delete(context.Background(), lrpdb, client.GracePeriodSeconds(1)) + if err != nil { + log.Info("Could not delete LRPDB resource", "err", err.Error()) + return err + } + } + + r.Recorder.Eventf(lrpdb, corev1.EventTypeNormal, "Deleted", "LRPDB '%s' dropped successfully", lrpdb.Spec.LRPDBName) + + log.Info("Successfully deleted LRPDB resource") + return nil +} + +/* +************************************************ + - Check LRPDB deletion + /*********************************************** +*/ +func (r *LRPDBReconciler) manageLRPDBDeletion(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) error { + log := r.Log.WithValues("manageLRPDBDeletion", req.NamespacedName) + + // Check if the LRPDB instance is marked to be deleted, which is + // indicated by the deletion timestamp being set. + isLRPDBMarkedToBeDeleted := lrpdb.GetDeletionTimestamp() != nil + if isLRPDBMarkedToBeDeleted { + log.Info("Marked to be deleted") + lrpdb.Status.Phase = lrpdbPhaseDelete + lrpdb.Status.Status = true + r.Status().Update(ctx, lrpdb) + + if controllerutil.ContainsFinalizer(lrpdb, LRPDBFinalizer) { + // Remove LRPDBFinalizer. Once all finalizers have been + // removed, the object will be deleted. + log.Info("Removing finalizer") + controllerutil.RemoveFinalizer(lrpdb, LRPDBFinalizer) + err := r.Update(ctx, lrpdb) + if err != nil { + log.Info("Could not remove finalizer", "err", err.Error()) + return err + } + log.Info("Successfully removed LRPDB resource") + return nil + } + } + + // Add finalizer for this CR + if !controllerutil.ContainsFinalizer(lrpdb, LRPDBFinalizer) { + log.Info("Adding finalizer") + controllerutil.AddFinalizer(lrpdb, LRPDBFinalizer) + err := r.Update(ctx, lrpdb) + if err != nil { + log.Info("Could not add finalizer", "err", err.Error()) + return err + } + lrpdb.Status.Status = false + } + return nil +} + +/* +************************************************ + - Finalization logic for LRPDBFinalizer + +*********************************************** +*/ +func (r *LRPDBReconciler) deleteLRPDBInstance(req ctrl.Request, ctx context.Context, lrpdb *dbapi.LRPDB) error { + + log := r.Log.WithValues("deleteLRPDBInstance", req.NamespacedName) + + var err error + + lrest, err := r.getLRESTResource(ctx, req, lrpdb) + if err != nil { + return err + } + + values := map[string]string{ + "action": "KEEP", + "getScript": strconv.FormatBool(*(lrpdb.Spec.GetScript))} + + if lrpdb.Spec.DropAction != "" { + values["action"] = lrpdb.Spec.DropAction + } + + lrpdbName := lrpdb.Spec.LRPDBName + url := r.BaseUrl(ctx, req, lrpdb, lrest) + lrpdbName + "/" + + lrpdb.Status.Phase = lrpdbPhaseDelete + lrpdb.Status.Msg = "Waiting for LRPDB to be deleted" + if err := r.Status().Update(ctx, lrpdb); err != nil { + log.Error(err, "Failed to update status for :"+lrpdb.Name, "err", err.Error()) + } + + respData, err := NewCallLAPI(r, ctx, req, lrpdb, url, values, "DELETE") + if err != nil { + log.Error(err, "Failure NewCallLAPI( "+url+")", "err", err.Error()) + return err + } + + r.GetSqlCode(respData, &(lrpdb.Status.SqlCode)) + globalsqlcode = lrpdb.Status.SqlCode + + log.Info("Successfully dropped LRPDB", "LRPDB Name", lrpdbName) + return nil +} + +/* +*********************************************************** + - SetupWithManager sets up the controller with the Manager + +************************************************************ +*/ +func (r *LRPDBReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&dbapi.LRPDB{}). + WithEventFilter(predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + // Ignore updates to CR status in which case metadata.Generation does not change + return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Evaluates to false if the object has been confirmed deleted. + //return !e.DeleteStateUnknown + return false + }, + }). + WithOptions(controller.Options{MaxConcurrentReconciles: 100}). + Complete(r) +} + +/************************************************************* +Enh 35357707 - PROVIDE THE LRPDB TNSALIAS INFORMATION +**************************************************************/ + +func parseTnsAlias(tns *string, lrpdbsrv *string) { + fmt.Printf("Analyzing string [%s]\n", *tns) + fmt.Printf("Relacing srv [%s]\n", *lrpdbsrv) + var swaptns string + + if strings.Contains(strings.ToUpper(*tns), "SERVICE_NAME") == false { + fmt.Print("Cannot generate tns alias for lrpdb") + return + } + + if strings.Contains(strings.ToUpper(*tns), "ORACLE_SID") == true { + fmt.Print("Cannot generate tns alias for lrpdb") + return + } + + swaptns = fmt.Sprintf("SERVICE_NAME=%s", *lrpdbsrv) + tnsreg := regexp.MustCompile(`SERVICE_NAME=\w+`) + *tns = tnsreg.ReplaceAllString(*tns, swaptns) + + fmt.Printf("Newstring [%s]\n", *tns) + +} + +// Compose url +func (r *LRPDBReconciler) BaseUrl(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB, lrest dbapi.LREST) string { + log := r.Log.WithValues("BaseUrl", req.NamespacedName) + baseurl := "https://" + lrpdb.Spec.CDBResName + "-lrest." + lrpdb.Spec.CDBNamespace + ":" + strconv.Itoa(lrest.Spec.LRESTPort) + "/database/pdbs/" + log.Info("Baseurl:" + baseurl) + return baseurl +} + +func (r *LRPDBReconciler) DecryptWithPrivKey(Key string, Buffer string, req ctrl.Request) (string, error) { + log := r.Log.WithValues("DecryptWithPrivKey", req.NamespacedName) + Debug := 0 + block, _ := pem.Decode([]byte(Key)) + pkcs8PrivateKey, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + log.Error(err, "Failed to parse private key - "+err.Error()) + return "", err + } + if Debug == 1 { + fmt.Printf("======================================\n") + fmt.Printf("%s\n", Key) + fmt.Printf("======================================\n") + } + + encString64, err := base64.StdEncoding.DecodeString(string(Buffer)) + if err != nil { + log.Error(err, "Failed to decode encrypted string to base64 - "+err.Error()) + return "", err + } + + decryptedB, err := rsa.DecryptPKCS1v15(nil, pkcs8PrivateKey.(*rsa.PrivateKey), encString64) + if err != nil { + log.Error(err, "Failed to decrypt string - "+err.Error()) + return "", err + } + if Debug == 1 { + fmt.Printf("[%s]\n", string(decryptedB)) + } + return strings.TrimSpace(string(decryptedB)), err + +} + +// New function to decrypt credential using private key +func (r *LRPDBReconciler) getEncriptedSecret(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB, secretName string, keyName string, secretNamePk string, keyNamePk string) (string, error) { + + log := r.Log.WithValues("getEncriptedSecret", req.NamespacedName) + + log.Info("getEncriptedSecret :" + secretName) + secret1 := &corev1.Secret{} + err := r.Get(ctx, types.NamespacedName{Name: secretName, Namespace: lrpdb.Namespace}, secret1) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + secretName) + lrpdb.Status.Msg = "Secret not found:" + secretName + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + + secret2 := &corev1.Secret{} + err = r.Get(ctx, types.NamespacedName{Name: secretNamePk, Namespace: lrpdb.Namespace}, secret2) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + secretNamePk) + lrpdb.Status.Msg = "Secret not found:" + secretNamePk + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + + Encval := string(secret1.Data[keyName]) + Encval = strings.TrimSpace(Encval) + + privKey := string(secret2.Data[keyNamePk]) + privKey = strings.TrimSpace(privKey) + + /* Debuug info for dev phase + fmt.Printf("DEBUG Secretename:secretName :%s\n", secretName) + fmt.Printf("DEBUG privKey :%s\n", privKey) + fmt.Printf("DEBUG Encval :%s\n", Encval) + */ + + DecVal, err := r.DecryptWithPrivKey(privKey, Encval, req) + if err != nil { + log.Error(err, "Fail to decrypt secret:"+secretName) + lrpdb.Status.Msg = " Fail to decrypt secret:" + secretName + return "", err + } + return DecVal, nil +} + +func (r *LRPDBReconciler) manageLRPDBDeletion2(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) error { + log := r.Log.WithValues("manageLRPDBDeletion", req.NamespacedName) + if lrpdb.ObjectMeta.DeletionTimestamp.IsZero() { + if !controllerutil.ContainsFinalizer(lrpdb, LRPDBFinalizer) { + controllerutil.AddFinalizer(lrpdb, LRPDBFinalizer) + if err := r.Update(ctx, lrpdb); err != nil { + return err + } + } + } else { + log.Info("Pdb marked to be delted") + if controllerutil.ContainsFinalizer(lrpdb, LRPDBFinalizer) { + if assertiveLpdbDeletion == true { + log.Info("Deleting lrpdb CRD: Assertive approach is turned on ") + lrest, err := r.getLRESTResource(ctx, req, lrpdb) + if err != nil { + log.Error(err, "Cannont find cdb resource ", "err", err.Error()) + return err + } + + lrpdbName := lrpdb.Spec.LRPDBName + if lrpdb.Status.OpenMode == "READ WRITE" { + valuesclose := map[string]string{ + "state": "CLOSE", + "modifyOption": "IMMEDIATE", + "getScript": "FALSE"} + lrpdbName := lrpdb.Spec.LRPDBName + url := r.BaseUrl(ctx, req, lrpdb, lrest) + lrpdbName + "/status/" + _, errclose := r.callAPI(ctx, req, lrpdb, url, valuesclose, "POST") + if errclose != nil { + log.Info("Warning error closing lrpdb continue anyway") + } + } + + valuesdrop := map[string]string{ + "action": "INCLUDING", + "getScript": "FALSE"} + url := r.BaseUrl(ctx, req, lrpdb, lrest) + lrpdbName + "/" + + log.Info("Call Delete()") + _, errdelete := r.callAPI(ctx, req, lrpdb, url, valuesdrop, "DELETE") + if errdelete != nil { + log.Error(errdelete, "Fail to delete lrpdb :"+lrpdb.Name, "err", err.Error()) + return errdelete + } + } /* END OF ASSERTIVE SECTION */ + + log.Info("Marked to be deleted") + lrpdb.Status.Phase = lrpdbPhaseDelete + lrpdb.Status.Status = true + r.Status().Update(ctx, lrpdb) + + controllerutil.RemoveFinalizer(lrpdb, LRPDBFinalizer) + if err := r.Update(ctx, lrpdb); err != nil { + log.Info("Cannot remove finalizer") + return err + } + + } + + return nil + } + + return nil +} + +func (r *LRPDBReconciler) InitConfigMap(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) *corev1.ConfigMap { + log := r.Log.WithValues("InitConfigMap", req.NamespacedName) + log.Info("ConfigMap..............:" + "ConfigMap" + lrpdb.Name) + log.Info("ConfigMap nmsp.........:" + lrpdb.Namespace) + /* + * PDB SYSTEM PARAMETER + * record [name,value=[paramete_val|reset],level=[session|system]] + */ + + if lrpdb.Spec.PDBConfigMap == "" { + /* if users does not specify a config map + we generate an empty new one for possible + future pdb parameter modification */ + + var SystemParameters map[string]string + + log.Info("Generating an empty configmap") + globalconfigmap = "configmap-" + lrpdb.Spec.LRPDBName + "-default" + DbParameters := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "configmap", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: globalconfigmap, + Namespace: lrpdb.Namespace, + }, + Data: SystemParameters, + } + + if err := ctrl.SetControllerReference(lrpdb, DbParameters, r.Scheme); err != nil { + log.Error(err, "Fail to set SetControllerReference", "err", err.Error()) + return nil + } + + /* Update Spec.PDBConfigMap */ + lrpdb.Spec.PDBConfigMap = "configmap" + lrpdb.Spec.LRPDBName + "default" + if err := r.Update(ctx, lrpdb); err != nil { + log.Error(err, "Failure updating Spec.PDBConfigMap ", "err", err.Error()) + return nil + } + lrpdb.Status.Bitstat = bis(lrpdb.Status.Bitstat, MPEMPT) + return DbParameters + + } else { + + lrpdb.Status.Bitstat = bis(lrpdb.Status.Bitstat, MPINIT) + globalconfigmap = lrpdb.Spec.PDBConfigMap + DbParameters, err := r.GetConfigMap(ctx, req, lrpdb) + if err != nil { + log.Error(err, "Fail to fetch configmap ", "err", err.Error()) + return nil + } + + //ParseConfigMapData(DbParameters) + + return DbParameters + } + + return nil +} + +func (r *LRPDBReconciler) GetConfigMap(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) (*corev1.ConfigMap, error) { + log := r.Log.WithValues("GetConfigMap", req.NamespacedName) + log.Info("ConfigMapGlobal.............:" + globalconfigmap) + DbParameters, err := k8s.FetchConfigMap(r.Client, lrpdb.Namespace, globalconfigmap) + if err != nil { + log.Error(err, "Fail to fetch configmap", "err", err.Error()) + return nil, err + } + + return DbParameters, nil +} + +func (r *LRPDBReconciler) ApplyConfigMap(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) (int32, error) { + log := r.Log.WithValues("ApplyConfigMap", req.NamespacedName) + /* We read the config map and apply the setting to the pdb */ + + log.Info("Starting Apply Config Map Process") + configmap, err := r.GetConfigMap(ctx, req, lrpdb) + if err != nil { + log.Info("Cannot get config map in the open yaml file") + return 0, nil + } + Cardinality := int32(len(configmap.Data)) + if Cardinality == 0 { + log.Info("Empty config map... nothing to do ") + return 0, nil + } + log.Info("GetConfigMap completed") + + lrest, err := r.getLRESTResource(ctx, req, lrpdb) + if err != nil { + log.Info("Cannot find lrest server") + return 0, nil + } + tokens := lrcommons.ParseConfigMapData(configmap) + for cnt := range tokens { + if len(tokens[cnt]) != 0 { + /* avoid null token and check malformed value */ + fmt.Printf("token=[%s]\n", tokens[cnt]) + Parameter := strings.Split(tokens[cnt], " ") + if len(Parameter) != 3 { + log.Info("WARNING malformed value in the configmap") + } else { + fmt.Printf("alter system set %s=%s scope=%s instances=all\n", Parameter[0], Parameter[1], Parameter[2]) + /* Preparing PayLoad + ----------------- + WARNING: event setting is not yet supported. It will be implemented in future release + */ + AlterSystemPayload := map[string]string{ + "state": "ALTER", + "alterSystemParameter": Parameter[0], + "alterSystemValue": Parameter[1], + "parameterScope": Parameter[2], + } + url := r.BaseUrl(ctx, req, lrpdb, lrest) + lrpdb.Spec.LRPDBName + respData, err := r.callAPI(ctx, req, lrpdb, url, AlterSystemPayload, "POST") + if err != nil { + log.Error(err, "callAPI failure durring Apply Config Map", "err", err.Error()) + return 0, err + } + /* check sql code execution */ + var retJson map[string]interface{} + if err := json.Unmarshal([]byte(respData), &retJson); err != nil { + log.Error(err, "failed to get Data from callAPI", "err", err.Error()) + return 0, err + } + /* We do not the execution if something goes wrong for a single parameter + just report the error in the event queue */ + SqlCode := strconv.Itoa(int(retJson["sqlcode"].(float64))) + AlterMsg := fmt.Sprintf("pdb=%s:%s:%s:%s:%s", lrpdb.Spec.LRPDBName, Parameter[0], Parameter[1], Parameter[2], SqlCode) + log.Info("Config Map Apply:......." + AlterMsg) + + if SqlCode != "0" { + r.Recorder.Eventf(lrpdb, corev1.EventTypeWarning, "LRESTINFO", AlterMsg) + lrpdb.Status.Bitstat = bis(lrpdb.Status.Bitstat, MPWARN) + } + + } + } + + } + + lrpdb.Status.Bitstat = bis(lrpdb.Status.Bitstat, MPAPPL) + + return Cardinality, nil +} + +func (r *LRPDBReconciler) ManageConfigMapForCloningAndPlugin(ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB) error { + log := r.Log.WithValues("ManageConfigMapForCloningAndPlugin", req.NamespacedName) + log.Info("Frame:") + /* + If configmap parameter is set and init flag is not set + then we need to iniialized the init mask. This is the case for + pdb generated by clone and plug action + */ + if lrpdb.Spec.Action != "CREATE" && lrpdb.Spec.PDBConfigMap != "" && bit(lrpdb.Status.Bitstat, MPINIT) == false { + if r.InitConfigMap(ctx, req, lrpdb) == nil { + log.Info("Cannot initialize config map for pdb.........:" + lrpdb.Spec.LRPDBName) + return nil + } + log.Info("Call...........:ApplyConfigMap(ctx, req, lrpdb)") + Cardinality, _ := r.ApplyConfigMap(ctx, req, lrpdb) + log.Info("Cardnality:....:" + strconv.Itoa(int(Cardinality))) + if Cardinality == 0 { + return nil + } + + } + return nil +} + +func NewCallLAPI(intr interface{}, ctx context.Context, req ctrl.Request, lrpdb *dbapi.LRPDB, url string, payload map[string]string, action string) (string, error) { + var c client.Client + var r logr.Logger + var e record.EventRecorder + var err error + + recpdb, ok1 := intr.(*LRPDBReconciler) + if ok1 { + fmt.Printf("func NewCallLApi ((*PDBReconciler),......)\n") + c = recpdb.Client + e = recpdb.Recorder + r = recpdb.Log + } + + reccdb, ok2 := intr.(*LRESTReconciler) + if ok2 { + fmt.Printf("func NewCallLApi ((*CDBReconciler),......)\n") + c = reccdb.Client + e = reccdb.Recorder + r = reccdb.Log + } + + log := r.WithValues("NewCallLAPI", req.NamespacedName) + + secret := &corev1.Secret{} + + err = c.Get(ctx, types.NamespacedName{Name: lrpdb.Spec.LRPDBTlsKey.Secret.SecretName, Namespace: lrpdb.Namespace}, secret) + + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + lrpdb.Spec.LRPDBTlsKey.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + rsaKeyPEM := secret.Data[lrpdb.Spec.LRPDBTlsKey.Secret.Key] + + err = c.Get(ctx, types.NamespacedName{Name: lrpdb.Spec.LRPDBTlsCrt.Secret.SecretName, Namespace: lrpdb.Namespace}, secret) + + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + lrpdb.Spec.LRPDBTlsCrt.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + + rsaCertPEM := secret.Data[lrpdb.Spec.LRPDBTlsCrt.Secret.Key] + + err = c.Get(ctx, types.NamespacedName{Name: lrpdb.Spec.LRPDBTlsCat.Secret.SecretName, Namespace: lrpdb.Namespace}, secret) + + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + lrpdb.Spec.LRPDBTlsCat.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + + caCert := secret.Data[lrpdb.Spec.LRPDBTlsCat.Secret.Key] + /* + r.Recorder.Eventf(lrpdb, corev1.EventTypeWarning, "LRESTINFO", string(rsaKeyPEM)) + r.Recorder.Eventf(lrpdb, corev1.EventTypeWarning, "LRESTINFO", string(rsaCertPEM)) + r.Recorder.Eventf(lrpdb, corev1.EventTypeWarning, "LRESTINFO", string(caCert)) + */ + + certificate, err := tls.X509KeyPair([]byte(rsaCertPEM), []byte(rsaKeyPEM)) + if err != nil { + lrpdb.Status.Msg = "Error tls.X509KeyPair" + return "", err + } + + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + /* + tlsConf := &tls.Config{Certificates: []tls.Certificate{certificate}, + RootCAs: caCertPool} + */ + tlsConf := &tls.Config{Certificates: []tls.Certificate{certificate}, + RootCAs: caCertPool, + //MinVersion: tls.VersionTLS12, + CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256}, + PreferServerCipherSuites: true, + CipherSuites: []uint16{ + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + }, + } + + tr := &http.Transport{TLSClientConfig: tlsConf} + + httpclient := &http.Client{Transport: tr} + + log.Info("Issuing REST call", "URL", url, "Action", action) + + // Get Web Server User + //secret := &corev1.Secret{} + err = c.Get(ctx, types.NamespacedName{Name: lrpdb.Spec.WebLrpdbServerUser.Secret.SecretName, Namespace: lrpdb.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + lrpdb.Spec.WebLrpdbServerUser.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + webUserEnc := string(secret.Data[lrpdb.Spec.WebLrpdbServerUser.Secret.Key]) + webUserEnc = strings.TrimSpace(webUserEnc) + + err = c.Get(ctx, types.NamespacedName{Name: lrpdb.Spec.LRPDBPriKey.Secret.SecretName, Namespace: lrpdb.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + lrpdb.Spec.LRPDBPriKey.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + privKey := string(secret.Data[lrpdb.Spec.LRPDBPriKey.Secret.Key]) + webUser, err := lrcommons.CommonDecryptWithPrivKey(privKey, webUserEnc, req) + + // Get Web Server User Password + secret = &corev1.Secret{} + err = c.Get(ctx, types.NamespacedName{Name: lrpdb.Spec.WebLrpdbServerPwd.Secret.SecretName, Namespace: lrpdb.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + lrpdb.Spec.WebLrpdbServerPwd.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + webUserPwdEnc := string(secret.Data[lrpdb.Spec.WebLrpdbServerPwd.Secret.Key]) + webUserPwdEnc = strings.TrimSpace(webUserPwdEnc) + webUserPwd, err := lrcommons.CommonDecryptWithPrivKey(privKey, webUserPwdEnc, req) + + var httpreq *http.Request + if action == "GET" { + httpreq, err = http.NewRequest(action, url, nil) + } else { + jsonValue, _ := json.Marshal(payload) + httpreq, err = http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) + } + + if err != nil { + log.Info("Unable to create HTTP Request for LRPDB : "+lrpdb.Name, "err", err.Error()) + return "", err + } + + httpreq.Header.Add("Accept", "application/json") + httpreq.Header.Add("Content-Type", "application/json") + httpreq.SetBasicAuth(webUser, webUserPwd) + + resp, err := httpclient.Do(httpreq) + if err != nil { + errmsg := err.Error() + log.Error(err, "Failed - Could not connect to LREST Pod", "err", err.Error()) + lrpdb.Status.Msg = "Error: Could not connect to LREST Pod" + e.Eventf(lrpdb, corev1.EventTypeWarning, "LRESTError", errmsg) + return "", err + } + + e.Eventf(lrpdb, corev1.EventTypeWarning, "Done", lrpdb.Spec.CDBResName) + if resp.StatusCode != http.StatusOK { + bb, _ := ioutil.ReadAll(resp.Body) + + if resp.StatusCode == 404 { + lrpdb.Status.ConnString = "" + lrpdb.Status.Msg = lrpdb.Spec.LRPDBName + " not found" + + } else { + if flood_control == false { + lrpdb.Status.Msg = "LREST Error - HTTP Status Code:" + strconv.Itoa(resp.StatusCode) + } + } + + if flood_control == false { + log.Info("LREST Error - HTTP Status Code :"+strconv.Itoa(resp.StatusCode), "Err", string(bb)) + } + + var apiErr LRESTError + json.Unmarshal([]byte(bb), &apiErr) + if flood_control == false { + e.Eventf(lrpdb, corev1.EventTypeWarning, "LRESTError", "Failed: %s", apiErr.Message) + } + fmt.Printf("\n================== APIERR ======================\n") + fmt.Printf("%+v \n", apiErr) + fmt.Printf(string(bb)) + fmt.Printf("URL=%s\n", url) + fmt.Printf("resp.StatusCode=%s\n", strconv.Itoa(resp.StatusCode)) + fmt.Printf("\n================== APIERR ======================\n") + flood_control = true + return "", errors.New("LREST Error") + } + flood_control = false + + defer resp.Body.Close() + + bodyBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + fmt.Print(err.Error()) + } + respData := string(bodyBytes) + fmt.Print("CALL API return msg.....:") + fmt.Println(string(bodyBytes)) + + var apiResponse restSQLCollection + json.Unmarshal([]byte(bodyBytes), &apiResponse) + fmt.Printf("===> %#v\n", apiResponse) + fmt.Printf("===> %+v\n", apiResponse) + + errFound := false + for _, sqlItem := range apiResponse.Items { + if sqlItem.ErrorDetails != "" { + log.Info("LREST Error - Oracle Error Code :" + strconv.Itoa(sqlItem.ErrorCode)) + if !errFound { + lrpdb.Status.Msg = sqlItem.ErrorDetails + } + e.Eventf(lrpdb, corev1.EventTypeWarning, "OraError", "%s", sqlItem.ErrorDetails) + errFound = true + } + } + + if errFound { + return "", errors.New("Oracle Error") + } + + return respData, nil +} + +func (r *LRPDBReconciler) GetSqlCode(rsp string, sqlcode *int) error { + log := r.Log.WithValues("GetSqlCode", "callAPI(...)") + + var objmap map[string]interface{} + if err := json.Unmarshal([]byte(rsp), &objmap); err != nil { + log.Error(err, "failed to get respData from callAPI", "err", err.Error()) + return err + } + + *sqlcode = int(objmap["sqlcode"].(float64)) + log.Info("sqlcode.......:ora-" + strconv.Itoa(*sqlcode)) + if *sqlcode != 0 { + switch strconv.Itoa(*sqlcode) { + case "65019": /* already open */ + return nil + case "65020": /* already closed */ + return nil + } + err := fmt.Errorf("%v", sqlcode) + return err + } + return nil +} diff --git a/controllers/database/oraclerestdataservice_controller.go b/controllers/database/oraclerestdataservice_controller.go index 783ae70c..053f4a19 100644 --- a/controllers/database/oraclerestdataservice_controller.go +++ b/controllers/database/oraclerestdataservice_controller.go @@ -60,7 +60,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/log" - dbapi "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + dbapi "github.com/oracle/oracle-database-operator/apis/database/v4" dbcommons "github.com/oracle/oracle-database-operator/commons/database" "github.com/go-logr/logr" @@ -210,6 +210,13 @@ func (r *OracleRestDataServiceReconciler) Reconcile(ctx context.Context, req ctr return result, nil } + // Configure MongoDB + result = r.enableMongoDB(oracleRestDataService, singleInstanceDatabase, sidbReadyPod, ordsReadyPod, ctx, req) + if result.Requeue { + r.Log.Info("Reconcile queued") + return result, nil + } + // Delete Secrets r.deleteSecrets(oracleRestDataService, ctx, req) @@ -263,37 +270,6 @@ func (r *OracleRestDataServiceReconciler) validate(m *dbapi.OracleRestDataServic eventMsgs = append(eventMsgs, "image patching is not available currently") } - // Validate the apex ADMIN password if it is specified - - if !m.Status.ApexConfigured && m.Spec.ApexPassword.SecretName != "" { - apexPasswordSecret := &corev1.Secret{} - err = r.Get(ctx, types.NamespacedName{Name: m.Spec.ApexPassword.SecretName, Namespace: m.Namespace}, apexPasswordSecret) - if err != nil { - if apierrors.IsNotFound(err) { - m.Status.Status = dbcommons.StatusError - eventReason := "Apex Password" - eventMsg := "password secret " + m.Spec.ApexPassword.SecretName + " not found, retrying..." - r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) - r.Log.Info(eventMsg) - return requeueY, nil - } - r.Log.Error(err, err.Error()) - return requeueY, err - } - // APEX_LISTENER , APEX_REST_PUBLIC_USER , APEX_PUBLIC_USER passwords - apexPassword := string(apexPasswordSecret.Data[m.Spec.ApexPassword.SecretKey]) - - // Validate apexPassword - if !dbcommons.ApexPasswordValidator(apexPassword) { - m.Status.Status = dbcommons.StatusError - eventReason := "Apex Password" - eventMsg := "password for Apex is invalid, it should contain at least 6 chars, at least one numeric character, at least one punctuation character (!\"#$%&()``*+,-/:;?_), at least one upper-case alphabet" - r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) - r.Log.Info("APEX password does not conform to the requirements") - return requeueY, nil - } - } - if len(eventMsgs) > 0 { m.Status.Status = dbcommons.StatusError r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, strings.Join(eventMsgs, ",")) @@ -406,7 +382,7 @@ func (r *OracleRestDataServiceReconciler) checkHealthStatus(m *dbapi.OracleRestD return requeueY, readyPod } if readyPod.Name == "" { - m.Status.Status = dbcommons.StatusNotReady + m.Status.Status = dbcommons.StatusPending return requeueY, readyPod } @@ -425,7 +401,7 @@ func (r *OracleRestDataServiceReconciler) checkHealthStatus(m *dbapi.OracleRestD } } - m.Status.Status = dbcommons.StatusNotReady + m.Status.Status = dbcommons.StatusUpdating if strings.Contains(out, "HTTP/1.1 200 OK") || strings.Contains(strings.ToUpper(err.Error()), "HTTP/1.1 200 OK") { if n.Status.Status == dbcommons.StatusReady || n.Status.Status == dbcommons.StatusUpdating || n.Status.Status == dbcommons.StatusPatching { m.Status.Status = dbcommons.StatusReady @@ -447,7 +423,7 @@ func (r *OracleRestDataServiceReconciler) checkHealthStatus(m *dbapi.OracleRestD } } } - if m.Status.Status == dbcommons.StatusNotReady { + if m.Status.Status == dbcommons.StatusUpdating { return requeueY, readyPod } return requeueN, readyPod @@ -480,13 +456,24 @@ func (r *OracleRestDataServiceReconciler) instantiateSVCSpec(m *dbapi.OracleRest }(), }, Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{ - { - Name: "client", - Port: 8443, - Protocol: corev1.ProtocolTCP, - }, - }, + Ports: func() []corev1.ServicePort { + ports := []corev1.ServicePort{ + { + Name: "client", + Port: 8181, + Protocol: corev1.ProtocolTCP, + }, + } + // Conditionally add MongoDB port if enabled + if m.Spec.MongoDbApi { + ports = append(ports, corev1.ServicePort{ + Name: "mongodb", + Port: 27017, + Protocol: corev1.ProtocolTCP, + }) + } + return ports + }(), Selector: map[string]string{ "app": m.Name, }, @@ -509,24 +496,7 @@ func (r *OracleRestDataServiceReconciler) instantiateSVCSpec(m *dbapi.OracleRest // // ############################################################################# func (r *OracleRestDataServiceReconciler) instantiatePodSpec(m *dbapi.OracleRestDataService, - n *dbapi.SingleInstanceDatabase) (*corev1.Pod, *corev1.Secret) { - - initSecret := &corev1.Secret{ - TypeMeta: metav1.TypeMeta{ - Kind: "Secret", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: m.Name, - Namespace: m.Namespace, - Labels: map[string]string{ - "app": m.Name, - }, - }, - Type: corev1.SecretTypeOpaque, - StringData: map[string]string{ - "init-cmd": dbcommons.InitORDSCMD, - }, - } + n *dbapi.SingleInstanceDatabase, req ctrl.Request) *corev1.Pod { pod := &corev1.Pod{ TypeMeta: metav1.TypeMeta{ @@ -578,116 +548,92 @@ func (r *OracleRestDataServiceReconciler) instantiatePodSpec(m *dbapi.OracleRest }, }, { - Name: "init-ords-vol", + Name: "varmount", VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: m.Name, - Optional: func() *bool { i := true; return &i }(), - Items: []corev1.KeyToPath{{ - Key: "init-cmd", - Path: "init-cmd", - }}, - }, + EmptyDir: &corev1.EmptyDirVolumeSource{}, }, }, }, - InitContainers: []corev1.Container{ - { - Name: "init-permissions", - Image: m.Spec.Image.PullFrom, - Command: []string{"/bin/sh", "-c", fmt.Sprintf("chown %d:%d /opt/oracle/ords/config/ords || true", int(dbcommons.ORACLE_UID), int(dbcommons.DBA_GUID))}, - SecurityContext: &corev1.SecurityContext{ - // User ID 0 means, root user - RunAsUser: func() *int64 { i := int64(0); return &i }(), - }, - VolumeMounts: []corev1.VolumeMount{{ - MountPath: "/opt/oracle/ords/config/ords", - Name: "datamount", - SubPath: strings.ToUpper(n.Spec.Sid) + "_ORDS", - }}, - }, - { + InitContainers: func() []corev1.Container { + initContainers := []corev1.Container{} + if m.Spec.Persistence.Size != "" && m.Spec.Persistence.SetWritePermissions != nil && *m.Spec.Persistence.SetWritePermissions { + initContainers = append(initContainers, corev1.Container{ + Name: "init-permissions", + Image: m.Spec.Image.PullFrom, + Command: []string{"/bin/sh", "-c", fmt.Sprintf("chown %d:%d /etc/ords/config/ || true", int(dbcommons.ORACLE_UID), int(dbcommons.DBA_GUID))}, + SecurityContext: &corev1.SecurityContext{ + // User ID 0 means, root user + RunAsUser: func() *int64 { i := int64(0); return &i }(), + }, + VolumeMounts: []corev1.VolumeMount{{ + MountPath: "/etc/ords/config/", + Name: "datamount", + }}, + }) + } + + initContainers = append(initContainers, corev1.Container{ Name: "init-ords", Image: m.Spec.Image.PullFrom, - Command: []string{"/bin/sh", "/run/secrets/init-cmd"}, - SecurityContext: &corev1.SecurityContext{ - RunAsUser: func() *int64 { i := int64(dbcommons.ORACLE_UID); return &i }(), - RunAsGroup: func() *int64 { i := int64(dbcommons.DBA_GUID); return &i }(), + Command: []string{"/bin/sh"}, + Args: []string{ + "-c", + fmt.Sprintf("while [ ! -f /opt/oracle/variables/%s ]; do sleep 0.5; done", "conn_string.txt"), }, VolumeMounts: []corev1.VolumeMount{ { - MountPath: "/opt/oracle/ords/config/ords", + MountPath: "/etc/ords/config/", Name: "datamount", - SubPath: strings.ToUpper(n.Spec.Sid) + "_ORDS", }, { - MountPath: "/run/secrets/init-cmd", - ReadOnly: true, - Name: "init-ords-vol", - SubPath: "init-cmd", + MountPath: "/opt/oracle/variables/", + Name: "varmount", }, }, - Env: []corev1.EnvVar{ - { - Name: "ORACLE_HOST", - Value: n.Name, - }, - { - Name: "ORACLE_PORT", - Value: "1521", - }, - { - Name: "ORACLE_SERVICE", - Value: func() string { - if m.Spec.OracleService != "" { - return m.Spec.OracleService - } - return n.Spec.Sid - }(), - }, - { - Name: "ORDS_USER", - Value: func() string { - if m.Spec.OrdsUser != "" { - return m.Spec.OrdsUser - } - return "ORDS_PUBLIC_USER" - }(), - }, + }) + return initContainers + }(), + Containers: []corev1.Container{{ + Name: m.Name, + Image: m.Spec.Image.PullFrom, + Ports: func() []corev1.ContainerPort { + ports := []corev1.ContainerPort{ { - Name: "ORDS_PWD", - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: m.Spec.OrdsPassword.SecretName, - }, - Key: m.Spec.OrdsPassword.SecretKey, - }, - }, + ContainerPort: 8181, // Default application port }, - { - Name: "ORACLE_PWD", - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: m.Spec.AdminPassword.SecretName, - }, - Key: m.Spec.AdminPassword.SecretKey, - }, - }, + } + if m.Spec.MongoDbApi { + ports = append(ports, corev1.ContainerPort{ + ContainerPort: 27017, // MongoDB port + }) + } + return ports + }(), + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + Exec: &corev1.ExecAction{ + Command: []string{"/bin/sh", "-c", dbcommons.ORDSReadinessProbe}, }, }, + InitialDelaySeconds: 20, + TimeoutSeconds: 20, + PeriodSeconds: func() int32 { + if m.Spec.ReadinessCheckPeriod > 0 { + return int32(m.Spec.ReadinessCheckPeriod) + } + return 60 + }(), + }, + VolumeMounts: []corev1.VolumeMount{ + { + MountPath: "/etc/ords/config/", + Name: "datamount", + }, + { + MountPath: "/opt/oracle/variables/", + Name: "varmount", + }, }, - }, - Containers: []corev1.Container{{ - Name: m.Name, - Image: m.Spec.Image.PullFrom, - Ports: []corev1.ContainerPort{{ContainerPort: 8443}}, - VolumeMounts: []corev1.VolumeMount{{ - MountPath: "/opt/oracle/ords/config/ords/", - Name: "datamount", - SubPath: strings.ToUpper(n.Spec.Sid) + "_ORDS", - }}, Env: func() []corev1.EnvVar { // After ORDS is Installed, we DELETE THE OLD ORDS Pod and create new ones ONLY USING BELOW ENV VARIABLES. return []corev1.EnvVar{ @@ -753,9 +699,9 @@ func (r *OracleRestDataServiceReconciler) instantiatePodSpec(m *dbapi.OracleRest } // Set oracleRestDataService instance as the owner and controller - ctrl.SetControllerReference(m, initSecret, r.Scheme) + // ctrl.SetControllerReference(m, initSecret, r.Scheme) ctrl.SetControllerReference(m, pod, r.Scheme) - return pod, initSecret + return pod } //############################################################################# @@ -878,14 +824,21 @@ func (r *OracleRestDataServiceReconciler) createSVC(ctx context.Context, req ctr if lbAddress == "" { lbAddress = svc.Status.LoadBalancer.Ingress[0].IP } - m.Status.DatabaseApiUrl = "https://" + lbAddress + ":" + - fmt.Sprint(svc.Spec.Ports[0].Port) + "/ords/" + n.Status.Pdbname + "/_/db-api/stable/" + m.Status.DatabaseApiUrl = "http://" + lbAddress + ":" + + fmt.Sprint(svc.Spec.Ports[0].Port) + "/ords/" + "{schema-name}" + "/_/db-api/stable/" m.Status.ServiceIP = lbAddress - m.Status.DatabaseActionsUrl = "https://" + lbAddress + ":" + + m.Status.DatabaseActionsUrl = "http://" + lbAddress + ":" + fmt.Sprint(svc.Spec.Ports[0].Port) + "/ords/sql-developer" if m.Status.ApexConfigured { - m.Status.ApxeUrl = "https://" + lbAddress + ":" + - fmt.Sprint(svc.Spec.Ports[0].Port) + "/ords/" + n.Status.Pdbname + "/apex" + m.Status.ApxeUrl = "http://" + lbAddress + ":" + + fmt.Sprint(svc.Spec.Ports[0].Port) + "/ords/apex" + } + if m.Status.MongoDbApi && len(svc.Spec.Ports) > 1 { + m.Status.MongoDbApiAccessUrl = "mongodb://[{user}:{password}@]" + lbAddress + ":" + + fmt.Sprint(svc.Spec.Ports[1].Port) + "/{user}?" + + "authMechanism=PLAIN&authSource=$external&ssl=true&retryWrites=false&loadBalanced=true" + } else { + m.Status.MongoDbApiAccessUrl = "" } } return requeueN @@ -893,13 +846,19 @@ func (r *OracleRestDataServiceReconciler) createSVC(ctx context.Context, req ctr nodeip := dbcommons.GetNodeIp(r, ctx, req) if nodeip != "" { m.Status.ServiceIP = nodeip - m.Status.DatabaseApiUrl = "https://" + nodeip + ":" + fmt.Sprint(svc.Spec.Ports[0].NodePort) + - "/ords/" + n.Status.Pdbname + "/_/db-api/stable/" - m.Status.DatabaseActionsUrl = "https://" + nodeip + ":" + fmt.Sprint(svc.Spec.Ports[0].NodePort) + + m.Status.DatabaseApiUrl = "http://" + nodeip + ":" + fmt.Sprint(svc.Spec.Ports[0].NodePort) + + "/ords/" + "{schema-name}" + "/_/db-api/stable/" + m.Status.DatabaseActionsUrl = "http://" + nodeip + ":" + fmt.Sprint(svc.Spec.Ports[0].NodePort) + "/ords/sql-developer" if m.Status.ApexConfigured { - m.Status.ApxeUrl = "https://" + nodeip + ":" + fmt.Sprint(svc.Spec.Ports[0].NodePort) + "/ords/" + - n.Status.Pdbname + "/apex" + m.Status.ApxeUrl = "http://" + nodeip + ":" + fmt.Sprint(svc.Spec.Ports[0].NodePort) + "/ords/apex" + } + if m.Status.MongoDbApi && len(svc.Spec.Ports) > 1 { + m.Status.MongoDbApiAccessUrl = "mongodb://[{user}:{password}@]" + nodeip + ":" + + fmt.Sprint(svc.Spec.Ports[1].NodePort) + "/{user}?" + + "authMechanism=PLAIN&authSource=$external&ssl=true&retryWrites=false&loadBalanced=true" + } else { + m.Status.MongoDbApiAccessUrl = "" } } return requeueN @@ -941,6 +900,97 @@ func (r *OracleRestDataServiceReconciler) createPVC(ctx context.Context, req ctr return requeueN, nil } +// ############################################################################# +// +// Function for creating connection sting file +// +// ############################################################################# +func (r *OracleRestDataServiceReconciler) createConnectionString(m *dbapi.OracleRestDataService, + n *dbapi.SingleInstanceDatabase, ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + + // Listing all the pods + readyPod, _, availableFinal, _, err := dbcommons.FindPods(r, m.Spec.Image.Version, + m.Spec.Image.PullFrom, m.Name, m.Namespace, ctx, req) + + if err != nil { + r.Log.Error(err, err.Error()) + return requeueY, nil + } + if readyPod.Name != "" { + return requeueN, nil + } + + if len(availableFinal) == 0 { + r.Log.Info("Pods are being created, currently no pods available") + return requeueY, nil + } + + // Iterate through the availableFinal (list of pods) to find out the pod whose status is updated about the init containers + // If no required pod found then requeue the reconcile request + var pod corev1.Pod + var podFound bool + for _, pod = range availableFinal { + // Check if pod status container is updated about init containers + if len(pod.Status.InitContainerStatuses) > 0 { + podFound = true + break + } + } + if !podFound { + r.Log.Info("No pod has its status updated about init containers. Requeueing...") + return requeueY, nil + } + + lastInitContIndex := len(pod.Status.InitContainerStatuses) - 1 + + // If InitContainerStatuses[].Ready is true, it means that the init container is successful + if pod.Status.InitContainerStatuses[lastInitContIndex].Ready { + // Init container named "init-ords" has completed it's execution, hence return and don't requeue + return requeueN, nil + } + + if pod.Status.InitContainerStatuses[lastInitContIndex].State.Running == nil { + // Init container named "init-ords" is not running, so waiting for it to come in running state requeueing the reconcile request + r.Log.Info("Waiting for init-ords to come in running state...") + return requeueY, nil + } + + r.Log.Info("Creating Connection String file...") + + // Querying the secret + r.Log.Info("Querying the database secret ...") + secret := &corev1.Secret{} + err = r.Get(ctx, types.NamespacedName{Name: m.Spec.AdminPassword.SecretName, Namespace: m.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + r.Log.Info("Secret not found") + m.Status.Status = dbcommons.StatusError + r.Status().Update(ctx, m) + return requeueY, nil + } + r.Log.Error(err, "Unable to get the secret. Requeueing..") + return requeueY, nil + } + + // Execing into the pods and creating the Connection String + adminPassword := string(secret.Data[m.Spec.AdminPassword.SecretKey]) + + _, err = dbcommons.ExecCommand(r, r.Config, pod.Name, pod.Namespace, "init-ords", + ctx, req, true, "bash", "-c", + fmt.Sprintf("mkdir -p /opt/oracle/variables && echo %[1]s > /opt/oracle/variables/%[2]s", + fmt.Sprintf(dbcommons.DbConnectString, adminPassword, n.Name, n.Status.Pdbname), + "conn_string.txt")) + + if err != nil { + r.Log.Error(err, err.Error()) + r.Log.Error(err, "Failed to create connection string in new "+m.Name+" POD", "pod.Namespace", pod.Namespace, "POD.Name", pod.Name) + return requeueY, nil + } + r.Log.Info("Succesfully Created connection string in new "+m.Name+" POD", "POD.NAME : ", pod.Name) + + return requeueN, nil +} + // ############################################################################# // // Create the requested POD replicas @@ -980,16 +1030,24 @@ func (r *OracleRestDataServiceReconciler) createPods(m *dbapi.OracleRestDataServ } else if replicasFound < replicasReq { // Create New Pods , Name of Pods are generated Randomly for i := replicasFound; i < replicasReq; i++ { - pod, initSecret := r.instantiatePodSpec(m, n) - // Check if init-secret is present - err := r.Get(ctx, types.NamespacedName{Name: m.Name, Namespace: m.Namespace}, &corev1.Secret{}) - if err != nil && apierrors.IsNotFound(err) { - log.Info("Creating a new secret", "name", m.Name) - if err = r.Create(ctx, initSecret); err != nil { - log.Error(err, "Failed to create secret ", "Namespace", initSecret.Namespace, "Name", initSecret.Name) + // Obtain admin password of the referred database + adminPasswordSecret := &corev1.Secret{} + err := r.Get(ctx, types.NamespacedName{Name: n.Spec.AdminPassword.SecretName, Namespace: n.Namespace}, adminPasswordSecret) + if err != nil { + if apierrors.IsNotFound(err) { + m.Status.Status = dbcommons.StatusError + eventReason := "Database Password" + eventMsg := "password secret " + m.Spec.AdminPassword.SecretName + " not found, retrying..." + r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) + r.Log.Info(eventMsg) return requeueY } + log.Error(err, err.Error()) + return requeueY } + + pod := r.instantiatePodSpec(m, n, req) + log.Info("Creating a new "+m.Name+" POD", "POD.Namespace", pod.Namespace, "POD.Name", pod.Name) err = r.Create(ctx, pod) if err != nil { @@ -1024,6 +1082,17 @@ func (r *OracleRestDataServiceReconciler) createPods(m *dbapi.OracleRestDataServ } } + // Creating conn string in pods + result, err := r.createConnectionString(m, n, ctx, req) + + if err != nil { + return requeueY + } + if result.Requeue { + log.Info("Requeued at connection string creation") + return requeueY + } + m.Status.Replicas = m.Spec.Replicas return requeueN @@ -1241,97 +1310,14 @@ func (r *OracleRestDataServiceReconciler) cleanupOracleRestDataService(req ctrl. // ############################################################################# func (r *OracleRestDataServiceReconciler) configureApex(m *dbapi.OracleRestDataService, n *dbapi.SingleInstanceDatabase, sidbReadyPod corev1.Pod, ordsReadyPod corev1.Pod, ctx context.Context, req ctrl.Request) ctrl.Result { - log := r.Log.WithValues("configureApex", req.NamespacedName) + log := r.Log.WithValues("verifyApex", req.NamespacedName) - if m.Spec.ApexPassword.SecretName == "" { - m.Status.ApexConfigured = false - return requeueN - } if m.Status.ApexConfigured { return requeueN } - apexPasswordSecret := &corev1.Secret{} - err := r.Get(ctx, types.NamespacedName{Name: m.Spec.ApexPassword.SecretName, Namespace: m.Namespace}, apexPasswordSecret) - if err != nil { - if apierrors.IsNotFound(err) { - m.Status.Status = dbcommons.StatusError - eventReason := "Apex Password" - eventMsg := "password secret " + m.Spec.ApexPassword.SecretName + " not found, retrying..." - r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) - r.Log.Info(eventMsg) - return requeueY - } - log.Error(err, err.Error()) - return requeueY - } - // APEX_LISTENER , APEX_REST_PUBLIC_USER , APEX_PUBLIC_USER passwords - apexPassword := string(apexPasswordSecret.Data[m.Spec.ApexPassword.SecretKey]) - - if !n.Status.ApexInstalled { - m.Status.Status = dbcommons.StatusUpdating - result := r.installApex(m, n, ordsReadyPod, apexPassword, ctx, req) - if result.Requeue { - log.Info("Reconcile requeued because apex installation failed") - return result - } - } else { - // Alter Apex Users - log.Info("Alter APEX Users") - _, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", - ctx, req, true, "bash", "-c", fmt.Sprintf("echo -e \"%s\" | %s", - fmt.Sprintf(dbcommons.AlterApexUsers, apexPassword, n.Spec.Pdbname), dbcommons.SQLPlusCLI)) - if err != nil { - log.Error(err, err.Error()) - return requeueY - } - } - - // Set Apex users in apex_rt,apex_al,apex files - out, err := dbcommons.ExecCommand(r, r.Config, ordsReadyPod.Name, ordsReadyPod.Namespace, "", ctx, req, true, "bash", "-c", - fmt.Sprintf(dbcommons.SetApexUsers, apexPassword)) - log.Info("SetApexUsers Output: \n" + out) - if strings.Contains(strings.ToUpper(out), "ERROR") { - return requeueY - } - if err != nil { - log.Info(err.Error()) - if strings.Contains(strings.ToUpper(err.Error()), "ERROR") { - return requeueY - } - } - - // ORDS needs to be restarted to configure APEX - r.Log.Info("Restarting ORDS Pod to complete APEX configuration: " + ordsReadyPod.Name) - var gracePeriodSeconds int64 = 0 - policy := metav1.DeletePropagationForeground - err = r.Delete(ctx, &ordsReadyPod, &client.DeleteOptions{ - GracePeriodSeconds: &gracePeriodSeconds, PropagationPolicy: &policy}) - if err != nil { - r.Log.Error(err, err.Error()) - } - - m.Status.ApexConfigured = true - r.Status().Update(ctx, m) - eventReason := "Apex Configuration" - eventMsg := "configuration of Apex completed!" - r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) - log.Info(eventMsg) - - // Cannot return requeue as the secrets will be deleted if keepSecert is false, which cause problem in pod restart - return requeueY -} - -// ############################################################################# -// -// Install APEX in SIDB -// -// ############################################################################# -func (r *OracleRestDataServiceReconciler) installApex(m *dbapi.OracleRestDataService, n *dbapi.SingleInstanceDatabase, - ordsReadyPod corev1.Pod, apexPassword string, ctx context.Context, req ctrl.Request) ctrl.Result { - log := r.Log.WithValues("installApex", req.NamespacedName) - // Obtain admin password of the referred database + adminPasswordSecret := &corev1.Secret{} err := r.Get(ctx, types.NamespacedName{Name: m.Spec.AdminPassword.SecretName, Namespace: m.Namespace}, adminPasswordSecret) if err != nil { @@ -1348,23 +1334,8 @@ func (r *OracleRestDataServiceReconciler) installApex(m *dbapi.OracleRestDataSer } sidbPassword := string(adminPasswordSecret.Data[m.Spec.AdminPassword.SecretKey]) - // Status Updation - m.Status.Status = dbcommons.StatusUpdating - r.Status().Update(ctx, m) - eventReason := "Apex Installation" - eventMsg := "performing install of Apex in database " + m.Spec.DatabaseRef - r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) - - //Install Apex in SIDB ready pod - out, err := dbcommons.ExecCommand(r, r.Config, ordsReadyPod.Name, ordsReadyPod.Namespace, "", ctx, req, true, "bash", "-c", - fmt.Sprintf(dbcommons.InstallApexInContainer, apexPassword, sidbPassword, n.Status.Pdbname)) - if err != nil { - log.Info(err.Error()) - } - log.Info("Apex installation output : \n" + out) - // Checking if Apex is installed successfully or not - out, err = dbcommons.ExecCommand(r, r.Config, ordsReadyPod.Name, ordsReadyPod.Namespace, "", ctx, req, true, "bash", "-c", + out, err := dbcommons.ExecCommand(r, r.Config, ordsReadyPod.Name, ordsReadyPod.Namespace, "", ctx, req, true, "bash", "-c", fmt.Sprintf(dbcommons.IsApexInstalled, sidbPassword, n.Status.Pdbname)) if err != nil { log.Error(err, err.Error()) @@ -1374,19 +1345,22 @@ func (r *OracleRestDataServiceReconciler) installApex(m *dbapi.OracleRestDataSer apexInstalled := "APEXVERSION:" if !strings.Contains(out, apexInstalled) { - eventReason = "Apex Installation" - eventMsg = "Unable to determine Apex version, retrying install..." + eventReason := "Apex Verification" + eventMsg := "Unable to determine Apex version, retrying..." r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) return requeueY } m.Status.Status = dbcommons.StatusReady - eventReason = "Apex Installation" + eventReason := "Apex Verification" outArr := strings.Split(out, apexInstalled) - eventMsg = "installation of Apex " + strings.TrimSpace(outArr[len(outArr)-1]) + " completed" + eventMsg := "Verification of Apex " + strings.TrimSpace(outArr[len(outArr)-1]) + " completed" r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) n.Status.ApexInstalled = true + m.Status.ApexConfigured = true r.Status().Update(ctx, n) + r.Status().Update(ctx, m) + return requeueN } @@ -1423,20 +1397,71 @@ func (r *OracleRestDataServiceReconciler) deleteSecrets(m *dbapi.OracleRestDataS } } } +} - if !*m.Spec.ApexPassword.KeepSecret { - // Fetch apexPassword Secret - apexPasswordSecret := &corev1.Secret{} - err := r.Get(ctx, types.NamespacedName{Name: m.Spec.ApexPassword.SecretName, Namespace: m.Namespace}, apexPasswordSecret) - if err == nil { - //Delete APEX Password Secret . - err := r.Delete(ctx, apexPasswordSecret, &client.DeleteOptions{}) - if err == nil { - log.Info("APEX password secret deleted : " + apexPasswordSecret.Name) +// ############################################################################# +// +// Enable MongoDB API Support +// +// ############################################################################# +func (r *OracleRestDataServiceReconciler) enableMongoDB(m *dbapi.OracleRestDataService, n *dbapi.SingleInstanceDatabase, + sidbReadyPod corev1.Pod, ordsReadyPod corev1.Pod, ctx context.Context, req ctrl.Request) ctrl.Result { + log := r.Log.WithValues("enableMongoDB", req.NamespacedName) + + if (m.Spec.MongoDbApi && !m.Status.MongoDbApi) || // setting MongoDbApi to true + (!m.Spec.MongoDbApi && m.Status.MongoDbApi) { // setting MongoDbApi to false + m.Status.Status = dbcommons.StatusUpdating + + out, err := dbcommons.ExecCommand(r, r.Config, ordsReadyPod.Name, ordsReadyPod.Namespace, "", ctx, req, true, "bash", "-c", + fmt.Sprintf(dbcommons.ConfigMongoDb, strconv.FormatBool(m.Spec.MongoDbApi))) + log.Info("configMongoDB Output: \n" + out) + + if strings.Contains(strings.ToUpper(out), "ERROR") { + return requeueY + } + if err != nil { + log.Info(err.Error()) + if strings.Contains(strings.ToUpper(err.Error()), "ERROR") { + return requeueY } } + + m.Status.MongoDbApi = m.Spec.MongoDbApi + m.Status.Status = dbcommons.StatusReady + r.Status().Update(ctx, m) + eventReason := "MongoDB-API Config" + eventMsg := "configuration of MongoDb API completed!" + r.Recorder.Eventf(m, corev1.EventTypeNormal, eventReason, eventMsg) + log.Info(eventMsg) + + // ORDS service is resatrted + r.Log.Info("Restarting ORDS Service : " + m.Name) + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: m.Name, Namespace: m.Namespace}, + } + var gracePeriodSeconds int64 = 0 + policy := metav1.DeletePropagationForeground + err = r.Delete(ctx, svc, &client.DeleteOptions{ + GracePeriodSeconds: &gracePeriodSeconds, PropagationPolicy: &policy}) + if err != nil { + r.Log.Error(err, "Failed to delete ORDS service", "Service Name", m.Name) + return requeueY + } + + // ORDS needs to be restarted to configure MongoDB API + r.Log.Info("Restarting ORDS Pod after configuring MongoDb API : " + ordsReadyPod.Name) + err = r.Delete(ctx, &ordsReadyPod, &client.DeleteOptions{ + GracePeriodSeconds: &gracePeriodSeconds, PropagationPolicy: &policy}) + if err != nil { + r.Log.Error(err, err.Error()) + } + return requeueY + + } else { + log.Info("MongoDB Already Configured") } + return requeueN } // ############################################################################# diff --git a/controllers/database/ordssrvs_controller.go b/controllers/database/ordssrvs_controller.go new file mode 100644 index 00000000..14c7f46e --- /dev/null +++ b/controllers/database/ordssrvs_controller.go @@ -0,0 +1,1116 @@ +/* +** Copyright (c) 2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package controllers + +import ( + "context" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + // dbapi "example.com/oracle-ords-operator/api/v1" + dbapi "github.com/oracle/oracle-database-operator/apis/database/v4" +) + +// Definitions of Standards +const ( + ordsSABase = "/opt/oracle/sa" + serviceHTTPPortName = "svc-http-port" + serviceHTTPSPortName = "svc-https-port" + serviceMongoPortName = "svc-mongo-port" + targetHTTPPortName = "pod-http-port" + targetHTTPSPortName = "pod-https-port" + targetMongoPortName = "pod-mongo-port" + globalConfigMapName = "settings-global" + poolConfigPreName = "settings-" // Append PoolName + controllerLabelKey = "oracle.com/ords-operator-filter" + controllerLabelVal = "oracle-database-operator" + specHashLabel = "oracle.com/ords-operator-spec-hash" +) + +// Definitions to manage status conditions +const ( + // typeAvailableORDS represents the status of the Workload reconciliation + typeAvailableORDS = "Available" + // typeUnsyncedORDS represents the status used when the configuration has changed but the Workload has not been restarted. + typeUnsyncedORDS = "Unsynced" +) + +// Trigger a restart of Pods on Config Changes +var RestartPods bool = false + +// OrdsSrvsReconciler reconciles a OrdsSrvs object +type OrdsSrvsReconciler struct { + client.Client + Scheme *runtime.Scheme + Recorder record.EventRecorder +} + +//+kubebuilder:rbac:groups=database.oracle.com,resources=ordssrvs,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=database.oracle.com,resources=ordssrvs/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=database.oracle.com,resources=ordssrvs/finalizers,verbs=update +//+kubebuilder:rbac:groups=core,resources=events,verbs=create;patch +//+kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=core,resources=configmaps/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch +//+kubebuilder:rbac:groups=core,resources=secrets/status,verbs=get +//+kubebuilder:rbac:groups=core,resources=services,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=core,resources=services/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=core,resources=deployments/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=apps,resources=daemonsets,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=core,resources=daemonsets/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=core,resources=statefulsets/status,verbs=get;update;patch + +// SetupWithManager sets up the controller with the Manager. +func (r *OrdsSrvsReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&dbapi.OrdsSrvs{}). + Owns(&corev1.ConfigMap{}). + Owns(&corev1.Secret{}). + Owns(&appsv1.Deployment{}). + Owns(&appsv1.StatefulSet{}). + Owns(&appsv1.DaemonSet{}). + Owns(&corev1.Service{}). + Complete(r) +} + +func (r *OrdsSrvsReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logr := log.FromContext(ctx) + ords := &dbapi.OrdsSrvs{} + + // Check if resource exists or was deleted + if err := r.Get(ctx, req.NamespacedName, ords); err != nil { + if apierrors.IsNotFound(err) { + logr.Info("Resource deleted") + return ctrl.Result{}, nil + } + logr.Error(err, "Error retrieving resource") + return ctrl.Result{Requeue: true, RequeueAfter: time.Minute}, err + } + + // Set the status as Unknown when no status are available + if ords.Status.Conditions == nil || len(ords.Status.Conditions) == 0 { + condition := metav1.Condition{Type: typeUnsyncedORDS, Status: metav1.ConditionUnknown, Reason: "Reconciling", Message: "Starting reconciliation"} + if err := r.SetStatus(ctx, req, ords, condition); err != nil { + return ctrl.Result{}, err + } + } + + // ConfigMap - Init Script + if err := r.ConfigMapReconcile(ctx, ords, ords.Name+"-"+"init-script", 0); err != nil { + logr.Error(err, "Error in ConfigMapReconcile (init-script)") + return ctrl.Result{}, err + } + + // ConfigMap - Global Settings + if err := r.ConfigMapReconcile(ctx, ords, ords.Name+"-"+globalConfigMapName, 0); err != nil { + logr.Error(err, "Error in ConfigMapReconcile (Global)") + return ctrl.Result{}, err + } + + // ConfigMap - Pool Settings + definedPools := make(map[string]bool) + for i := 0; i < len(ords.Spec.PoolSettings); i++ { + poolName := strings.ToLower(ords.Spec.PoolSettings[i].PoolName) + poolConfigMapName := ords.Name + "-" + poolConfigPreName + poolName + if definedPools[poolConfigMapName] { + return ctrl.Result{}, errors.New("poolName: " + poolName + " is not unique") + } + definedPools[poolConfigMapName] = true + if err := r.ConfigMapReconcile(ctx, ords, poolConfigMapName, i); err != nil { + logr.Error(err, "Error in ConfigMapReconcile (Pools)") + return ctrl.Result{}, err + } + } + if err := r.ConfigMapDelete(ctx, req, ords, definedPools); err != nil { + logr.Error(err, "Error in ConfigMapDelete (Pools)") + return ctrl.Result{}, err + } + if err := r.Get(ctx, req.NamespacedName, ords); err != nil { + logr.Error(err, "Failed to re-fetch") + return ctrl.Result{}, err + } + + // // Secrets - Pool Settings + // for i := 0; i < len(ords.Spec.PoolSettings); i++ { + // if err := r.SecretsReconcile(ctx, ords, i); err != nil { + // logr.Error(err, "Error in SecretsReconcile (Pools)") + // return ctrl.Result{}, err + // } + // } + + // Set the Type as Unsynced when a pod restart is required + if RestartPods { + condition := metav1.Condition{Type: typeUnsyncedORDS, Status: metav1.ConditionTrue, Reason: "Unsynced", Message: "Configurations have changed"} + if err := r.SetStatus(ctx, req, ords, condition); err != nil { + return ctrl.Result{}, err + } + } + + // Workloads + if err := r.WorkloadReconcile(ctx, req, ords, ords.Spec.WorkloadType); err != nil { + logr.Error(err, "Error in WorkloadReconcile") + return ctrl.Result{}, err + } + if err := r.WorkloadDelete(ctx, req, ords, ords.Spec.WorkloadType); err != nil { + logr.Error(err, "Error in WorkloadDelete") + return ctrl.Result{}, err + } + if err := r.Get(ctx, req.NamespacedName, ords); err != nil { + logr.Error(err, "Failed to re-fetch") + return ctrl.Result{}, err + } + + // Service + if err := r.ServiceReconcile(ctx, ords); err != nil { + logr.Error(err, "Error in ServiceReconcile") + return ctrl.Result{}, err + } + + // Set the Type as Available when a pod restart is not required + if !RestartPods { + condition := metav1.Condition{Type: typeAvailableORDS, Status: metav1.ConditionTrue, Reason: "Available", Message: "Workload in Sync"} + if err := r.SetStatus(ctx, req, ords, condition); err != nil { + return ctrl.Result{}, err + } + } + if err := r.Get(ctx, req.NamespacedName, ords); err != nil { + logr.Error(err, "Failed to re-fetch") + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +/************************************************ + * Status + *************************************************/ +func (r *OrdsSrvsReconciler) SetStatus(ctx context.Context, req ctrl.Request, ords *dbapi.OrdsSrvs, statusCondition metav1.Condition) error { + logr := log.FromContext(ctx).WithName("SetStatus") + + // Fetch before Status Update + if err := r.Get(ctx, req.NamespacedName, ords); err != nil { + logr.Error(err, "Failed to re-fetch") + return err + } + var readyWorkload int32 + var desiredWorkload int32 + switch ords.Spec.WorkloadType { + //nolint:goconst + case "StatefulSet": + workload := &appsv1.StatefulSet{} + if err := r.Get(ctx, types.NamespacedName{Name: ords.Name, Namespace: ords.Namespace}, workload); err != nil { + logr.Info("StatefulSet not ready") + } + readyWorkload = workload.Status.ReadyReplicas + desiredWorkload = workload.Status.Replicas + //nolint:goconst + case "DaemonSet": + workload := &appsv1.DaemonSet{} + if err := r.Get(ctx, types.NamespacedName{Name: ords.Name, Namespace: ords.Namespace}, workload); err != nil { + logr.Info("DaemonSet not ready") + } + readyWorkload = workload.Status.NumberReady + desiredWorkload = workload.Status.DesiredNumberScheduled + default: + workload := &appsv1.Deployment{} + if err := r.Get(ctx, types.NamespacedName{Name: ords.Name, Namespace: ords.Namespace}, workload); err != nil { + logr.Info("Deployment not ready") + } + readyWorkload = workload.Status.ReadyReplicas + desiredWorkload = workload.Status.Replicas + } + + var workloadStatus string + if readyWorkload == 0 { + workloadStatus = "Preparing" + } else if readyWorkload == desiredWorkload { + workloadStatus = "Healthy" + ords.Status.OrdsInstalled = true + } else { + workloadStatus = "Progressing" + } + + mongoPort := int32(0) + if ords.Spec.GlobalSettings.MongoEnabled { + mongoPort = *ords.Spec.GlobalSettings.MongoPort + } + + meta.SetStatusCondition(&ords.Status.Conditions, statusCondition) + ords.Status.Status = workloadStatus + ords.Status.WorkloadType = ords.Spec.WorkloadType + ords.Status.ORDSVersion = strings.Split(ords.Spec.Image, ":")[1] + ords.Status.HTTPPort = ords.Spec.GlobalSettings.StandaloneHTTPPort + ords.Status.HTTPSPort = ords.Spec.GlobalSettings.StandaloneHTTPSPort + ords.Status.MongoPort = mongoPort + ords.Status.RestartRequired = RestartPods + if err := r.Status().Update(ctx, ords); err != nil { + logr.Error(err, "Failed to update Status") + return err + } + return nil +} + +/************************************************ + * ConfigMaps + *************************************************/ +func (r *OrdsSrvsReconciler) ConfigMapReconcile(ctx context.Context, ords *dbapi.OrdsSrvs, configMapName string, poolIndex int) (err error) { + logr := log.FromContext(ctx).WithName("ConfigMapReconcile") + desiredConfigMap := r.ConfigMapDefine(ctx, ords, configMapName, poolIndex) + + // Create if ConfigMap not found + definedConfigMap := &corev1.ConfigMap{} + if err = r.Get(ctx, types.NamespacedName{Name: configMapName, Namespace: ords.Namespace}, definedConfigMap); err != nil { + if apierrors.IsNotFound(err) { + if err := r.Create(ctx, desiredConfigMap); err != nil { + return err + } + logr.Info("Created: " + configMapName) + RestartPods = true + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Create", "ConfigMap %s Created", configMapName) + // Requery for comparison + if err := r.Get(ctx, types.NamespacedName{Name: configMapName, Namespace: ords.Namespace}, definedConfigMap); err != nil { + return err + } + } else { + return err + } + } + if !equality.Semantic.DeepEqual(definedConfigMap.Data, desiredConfigMap.Data) { + if err = r.Update(ctx, desiredConfigMap); err != nil { + return err + } + logr.Info("Updated: " + configMapName) + RestartPods = true + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Update", "ConfigMap %s Updated", configMapName) + } + return nil +} + +/************************************************ + * Secrets - TODO (Watch and set RestartPods) + *************************************************/ +// func (r *OrdsSrvsReconciler) SecretsReconcile(ctx context.Context, ords *dbapi.OrdsSrvs, poolIndex int) (err error) { +// logr := log.FromContext(ctx).WithName("SecretsReconcile") +// definedSecret := &corev1.Secret{} + +// // Want to set ownership on the Secret for watching; also detects if TNS_ADMIN is needed. +// if ords.Spec.PoolSettings[i].DBSecret != nil { +// } +// if ords.Spec.PoolSettings[i].DBAdminUserSecret != nil { +// } +// if ords.Spec.PoolSettings[i].DBCDBAdminUserSecret != nil { +// } +// if ords.Spec.PoolSettings[i].TNSAdminSecret != nil { +// } +// if ords.Spec.PoolSettings[i].DBWalletSecret != nil { +// } + +// if ords.Spec.PoolSettings[i].TNSAdminSecret != nil { +// tnsSecretName := ords.Spec.PoolSettings[i].TNSAdminSecret.SecretName +// definedSecret := &corev1.Secret{} +// if err = r.Get(ctx, types.NamespacedName{Name: tnsSecretName, Namespace: ords.Namespace}, definedSecret); err != nil { +// ojdbcPropertiesData, ok := secret.Data["ojdbc.properties"] +// if ok { +// if err = r.Update(ctx, desiredConfigMap); err != nil { +// return err +// } +// } +// } +// } + +// return nil +// } + +/************************************************ + * Workloads + *************************************************/ +func (r *OrdsSrvsReconciler) WorkloadReconcile(ctx context.Context, req ctrl.Request, ords *dbapi.OrdsSrvs, kind string) (err error) { + logr := log.FromContext(ctx).WithName("WorkloadReconcile") + objectMeta := objectMetaDefine(ords, ords.Name) + selector := selectorDefine(ords) + template := r.podTemplateSpecDefine(ords, ctx, req) + + var desiredWorkload client.Object + var desiredSpecHash string + var definedSpecHash string + + switch kind { + case "StatefulSet": + desiredWorkload = &appsv1.StatefulSet{ + ObjectMeta: objectMeta, + Spec: appsv1.StatefulSetSpec{ + Replicas: &ords.Spec.Replicas, + Selector: &selector, + Template: template, + }, + } + desiredSpecHash = generateSpecHash(desiredWorkload.(*appsv1.StatefulSet).Spec) + desiredWorkload.(*appsv1.StatefulSet).ObjectMeta.Labels[specHashLabel] = desiredSpecHash + case "DaemonSet": + desiredWorkload = &appsv1.DaemonSet{ + ObjectMeta: objectMeta, + Spec: appsv1.DaemonSetSpec{ + Selector: &selector, + Template: template, + }, + } + desiredSpecHash = generateSpecHash(desiredWorkload.(*appsv1.DaemonSet).Spec) + desiredWorkload.(*appsv1.DaemonSet).ObjectMeta.Labels[specHashLabel] = desiredSpecHash + default: + desiredWorkload = &appsv1.Deployment{ + ObjectMeta: objectMeta, + Spec: appsv1.DeploymentSpec{ + Replicas: &ords.Spec.Replicas, + Selector: &selector, + Template: template, + }, + } + desiredSpecHash = generateSpecHash(desiredWorkload.(*appsv1.Deployment).Spec) + desiredWorkload.(*appsv1.Deployment).ObjectMeta.Labels[specHashLabel] = desiredSpecHash + } + + if err := ctrl.SetControllerReference(ords, desiredWorkload, r.Scheme); err != nil { + return err + } + + definedWorkload := reflect.New(reflect.TypeOf(desiredWorkload).Elem()).Interface().(client.Object) + if err = r.Get(ctx, types.NamespacedName{Name: ords.Name, Namespace: ords.Namespace}, definedWorkload); err != nil { + if apierrors.IsNotFound(err) { + if err := r.Create(ctx, desiredWorkload); err != nil { + condition := metav1.Condition{ + Type: typeAvailableORDS, + Status: metav1.ConditionFalse, + Reason: "Reconciling", + Message: fmt.Sprintf("Failed to create %s for the custom resource (%s): (%s)", kind, ords.Name, err), + } + if statusErr := r.SetStatus(ctx, req, ords, condition); statusErr != nil { + return statusErr + } + return err + } + logr.Info("Created: " + kind) + RestartPods = false + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Create", "Created %s", kind) + + return nil + } else { + return err + } + } + + definedLabelsField := reflect.ValueOf(definedWorkload).Elem().FieldByName("ObjectMeta").FieldByName("Labels") + if definedLabelsField.IsValid() { + specHashValue := definedLabelsField.MapIndex(reflect.ValueOf(specHashLabel)) + if specHashValue.IsValid() { + definedSpecHash = specHashValue.Interface().(string) + } else { + return err + } + } + + if desiredSpecHash != definedSpecHash { + logr.Info("Syncing Workload " + kind + " with new configuration") + if err := r.Client.Update(ctx, desiredWorkload); err != nil { + return err + } + RestartPods = true + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Update", "Updated %s", kind) + } + + if RestartPods && ords.Spec.ForceRestart { + logr.Info("Cycling: " + kind) + labelsField := reflect.ValueOf(desiredWorkload).Elem().FieldByName("Spec").FieldByName("Template").FieldByName("ObjectMeta").FieldByName("Labels") + if labelsField.IsValid() { + labels := labelsField.Interface().(map[string]string) + labels["configMapChanged"] = time.Now().Format("20060102T150405Z") + labelsField.Set(reflect.ValueOf(labels)) + if err := r.Update(ctx, desiredWorkload); err != nil { + return err + } + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Restart", "Restarted %s", kind) + RestartPods = false + } + } + + return nil +} + +// Service +func (r *OrdsSrvsReconciler) ServiceReconcile(ctx context.Context, ords *dbapi.OrdsSrvs) (err error) { + logr := log.FromContext(ctx).WithName("ServiceReconcile") + + HTTPport := *ords.Spec.GlobalSettings.StandaloneHTTPPort + HTTPSport := *ords.Spec.GlobalSettings.StandaloneHTTPSPort + MongoPort := *ords.Spec.GlobalSettings.MongoPort + + desiredService := r.ServiceDefine(ctx, ords, HTTPport, HTTPSport, MongoPort) + + definedService := &corev1.Service{} + if err = r.Get(ctx, types.NamespacedName{Name: ords.Name, Namespace: ords.Namespace}, definedService); err != nil { + if apierrors.IsNotFound(err) { + if err := r.Create(ctx, desiredService); err != nil { + return err + } + logr.Info("Created: Service") + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Create", "Service %s Created", ords.Name) + // Requery for comparison + if err := r.Get(ctx, types.NamespacedName{Name: ords.Name, Namespace: ords.Namespace}, definedService); err != nil { + return err + } + } else { + return err + } + } + + deisredPortCount := len(desiredService.Spec.Ports) + definedPortCount := len(definedService.Spec.Ports) + + if deisredPortCount != definedPortCount { + if err := r.Update(ctx, desiredService); err != nil { + return err + } + } + + for _, existingPort := range definedService.Spec.Ports { + if existingPort.Name == serviceHTTPPortName { + if existingPort.Port != HTTPport { + if err := r.Update(ctx, desiredService); err != nil { + return err + } + logr.Info("Updated HTTP Service Port: " + existingPort.Name) + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Update", "Service HTTP Port %s Updated", existingPort.Name) + } + } + if existingPort.Name == serviceHTTPSPortName { + if existingPort.Port != HTTPSport { + if err := r.Update(ctx, desiredService); err != nil { + return err + } + logr.Info("Updated HTTPS Service Port: " + existingPort.Name) + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Update", "Service HTTPS Port %s Updated", existingPort.Name) + } + } + if existingPort.Name == serviceMongoPortName { + if existingPort.Port != MongoPort { + if err := r.Update(ctx, desiredService); err != nil { + return err + } + logr.Info("Updated Mongo Service Port: " + existingPort.Name) + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Update", "Service Mongo Port %s Updated", existingPort.Name) + } + } + } + return nil +} + +/* +************************************************ + - Definers + +************************************************* +*/ +func objectMetaDefine(ords *dbapi.OrdsSrvs, name string) metav1.ObjectMeta { + labels := getLabels(ords.Name) + return metav1.ObjectMeta{ + Name: name, + Namespace: ords.Namespace, + Labels: labels, + } +} + +func selectorDefine(ords *dbapi.OrdsSrvs) metav1.LabelSelector { + labels := getLabels(ords.Name) + return metav1.LabelSelector{ + MatchLabels: labels, + } +} + +func (r *OrdsSrvsReconciler) podTemplateSpecDefine(ords *dbapi.OrdsSrvs, ctx context.Context, req ctrl.Request) corev1.PodTemplateSpec { + labels := getLabels(ords.Name) + specVolumes, specVolumeMounts := VolumesDefine(ords) + + envPorts := []corev1.ContainerPort{ + { + ContainerPort: *ords.Spec.GlobalSettings.StandaloneHTTPPort, + Name: targetHTTPPortName, + }, + { + ContainerPort: *ords.Spec.GlobalSettings.StandaloneHTTPSPort, + Name: targetHTTPSPortName, + }, + } + + if ords.Spec.GlobalSettings.MongoEnabled { + mongoPort := corev1.ContainerPort{ + ContainerPort: *ords.Spec.GlobalSettings.MongoPort, + Name: targetMongoPortName, + } + envPorts = append(envPorts, mongoPort) + } + + // Environment From Source + podSpecTemplate := + corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + Volumes: specVolumes, + SecurityContext: &corev1.PodSecurityContext{ + RunAsNonRoot: &[]bool{true}[0], + FSGroup: &[]int64{54321}[0], + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, + InitContainers: []corev1.Container{{ + Image: ords.Spec.Image, + Name: ords.Name + "-init", + ImagePullPolicy: corev1.PullIfNotPresent, + SecurityContext: securityContextDefine(), + Command: []string{"sh", "-c", ordsSABase + "/bin/init_script.sh"}, + Env: r.envDefine(ords, true, ctx), + VolumeMounts: specVolumeMounts, + }}, + Containers: []corev1.Container{{ + Image: ords.Spec.Image, + Name: ords.Name, + ImagePullPolicy: corev1.PullIfNotPresent, + SecurityContext: securityContextDefine(), + Ports: envPorts, + Command: []string{"/bin/bash", "-c", "ords --config $ORDS_CONFIG serve --apex-images /opt/oracle/apex/$APEX_VER/images --debug"}, + Env: r.envDefine(ords, false, ctx), + VolumeMounts: specVolumeMounts, + }}}, + } + + return podSpecTemplate +} + +// Volumes +func VolumesDefine(ords *dbapi.OrdsSrvs) ([]corev1.Volume, []corev1.VolumeMount) { + // Initialize the slice to hold specifications + var volumes []corev1.Volume + var volumeMounts []corev1.VolumeMount + + // SecretHelper + secretHelperVolume := volumeBuild(ords.Name+"-"+"init-script", "ConfigMap", 0770) + secretHelperVolumeMount := volumeMountBuild(ords.Name+"-"+"init-script", ordsSABase+"/bin", true) + + volumes = append(volumes, secretHelperVolume) + volumeMounts = append(volumeMounts, secretHelperVolumeMount) + + // Build volume specifications for globalSettings + standaloneVolume := volumeBuild("standalone", "EmptyDir") + standaloneVolumeMount := volumeMountBuild("standalone", ordsSABase+"/config/global/standalone/", false) + + globalWalletVolume := volumeBuild("sa-wallet-global", "EmptyDir") + globalWalletVolumeMount := volumeMountBuild("sa-wallet-global", ordsSABase+"/config/global/wallet/", false) + + globalLogVolume := volumeBuild("sa-log-global", "EmptyDir") + globalLogVolumeMount := volumeMountBuild("sa-log-global", ordsSABase+"/log/global/", false) + + globalConfigVolume := volumeBuild(ords.Name+"-"+globalConfigMapName, "ConfigMap") + globalConfigVolumeMount := volumeMountBuild(ords.Name+"-"+globalConfigMapName, ordsSABase+"/config/global/", true) + + globalDocRootVolume := volumeBuild("sa-doc-root", "EmptyDir") + globalDocRootVolumeMount := volumeMountBuild("sa-doc-root", ordsSABase+"/config/global/doc_root/", false) + + volumes = append(volumes, standaloneVolume, globalWalletVolume, globalLogVolume, globalConfigVolume, globalDocRootVolume) + volumeMounts = append(volumeMounts, standaloneVolumeMount, globalWalletVolumeMount, globalLogVolumeMount, globalConfigVolumeMount, globalDocRootVolumeMount) + + if ords.Spec.GlobalSettings.CertSecret != nil { + globalCertVolume := volumeBuild(ords.Spec.GlobalSettings.CertSecret.SecretName, "Secret") + globalCertVolumeMount := volumeMountBuild(ords.Spec.GlobalSettings.CertSecret.SecretName, ordsSABase+"/config/certficate/", true) + + volumes = append(volumes, globalCertVolume) + volumeMounts = append(volumeMounts, globalCertVolumeMount) + } + + // Build volume specifications for each pool in poolSettings + definedWalletSecret := make(map[string]bool) + definedTNSSecret := make(map[string]bool) + for i := 0; i < len(ords.Spec.PoolSettings); i++ { + poolName := strings.ToLower(ords.Spec.PoolSettings[i].PoolName) + + poolWalletName := "sa-wallet-" + poolName + poolWalletVolume := volumeBuild(poolWalletName, "EmptyDir") + poolWalletVolumeMount := volumeMountBuild(poolWalletName, ordsSABase+"/config/databases/"+poolName+"/wallet/", false) + + poolConfigName := ords.Name + "-" + poolConfigPreName + poolName + poolConfigVolume := volumeBuild(poolConfigName, "ConfigMap") + poolConfigVolumeMount := volumeMountBuild(poolConfigName, ordsSABase+"/config/databases/"+poolName+"/", true) + + volumes = append(volumes, poolWalletVolume, poolConfigVolume) + volumeMounts = append(volumeMounts, poolWalletVolumeMount, poolConfigVolumeMount) + + if ords.Spec.PoolSettings[i].DBWalletSecret != nil { + walletSecretName := ords.Spec.PoolSettings[i].DBWalletSecret.SecretName + if !definedWalletSecret[walletSecretName] { + // Only create the volume once + poolDBWalletVolume := volumeBuild(walletSecretName, "Secret") + volumes = append(volumes, poolDBWalletVolume) + definedWalletSecret[walletSecretName] = true + } + poolDBWalletVolumeMount := volumeMountBuild(walletSecretName, ordsSABase+"/config/databases/"+poolName+"/network/admin/", true) + volumeMounts = append(volumeMounts, poolDBWalletVolumeMount) + } + + if ords.Spec.PoolSettings[i].TNSAdminSecret != nil { + tnsSecretName := ords.Spec.PoolSettings[i].TNSAdminSecret.SecretName + if !definedTNSSecret[tnsSecretName] { + // Only create the volume once + poolTNSAdminVolume := volumeBuild(tnsSecretName, "Secret") + volumes = append(volumes, poolTNSAdminVolume) + definedTNSSecret[tnsSecretName] = true + } + poolTNSAdminVolumeMount := volumeMountBuild(tnsSecretName, ordsSABase+"/config/databases/"+poolName+"/network/admin/", true) + volumeMounts = append(volumeMounts, poolTNSAdminVolumeMount) + } + } + return volumes, volumeMounts +} + +func volumeMountBuild(name string, path string, readOnly bool) corev1.VolumeMount { + return corev1.VolumeMount{ + Name: name, + MountPath: path, + ReadOnly: readOnly, + } +} + +func volumeBuild(name string, source string, mode ...int32) corev1.Volume { + defaultMode := int32(0660) + if len(mode) > 0 { + defaultMode = mode[0] + } + switch source { + case "ConfigMap": + return corev1.Volume{ + Name: name, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + DefaultMode: &defaultMode, + LocalObjectReference: corev1.LocalObjectReference{ + Name: name, + }, + }, + }, + } + case "Secret": + return corev1.Volume{ + Name: name, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: name, + }, + }, + } + case "EmptyDir": + return corev1.Volume{ + Name: name, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + } + default: + return corev1.Volume{} + } +} + +// Service +func (r *OrdsSrvsReconciler) ServiceDefine(ctx context.Context, ords *dbapi.OrdsSrvs, HTTPport int32, HTTPSport int32, MongoPort int32) *corev1.Service { + labels := getLabels(ords.Name) + + servicePorts := []corev1.ServicePort{ + { + Name: serviceHTTPPortName, + Protocol: corev1.ProtocolTCP, + Port: HTTPport, + TargetPort: intstr.FromString(targetHTTPPortName), + }, + { + Name: serviceHTTPSPortName, + Protocol: corev1.ProtocolTCP, + Port: HTTPSport, + TargetPort: intstr.FromString(targetHTTPSPortName), + }, + } + + if ords.Spec.GlobalSettings.MongoEnabled { + mongoServicePort := corev1.ServicePort{ + Name: serviceMongoPortName, + Protocol: corev1.ProtocolTCP, + Port: MongoPort, + TargetPort: intstr.FromString(targetMongoPortName), + } + servicePorts = append(servicePorts, mongoServicePort) + } + + objectMeta := objectMetaDefine(ords, ords.Name) + def := &corev1.Service{ + ObjectMeta: objectMeta, + Spec: corev1.ServiceSpec{ + Selector: labels, + Ports: servicePorts, + }, + } + + // Set the ownerRef + if err := ctrl.SetControllerReference(ords, def, r.Scheme); err != nil { + return nil + } + return def +} + +func securityContextDefine() *corev1.SecurityContext { + return &corev1.SecurityContext{ + RunAsNonRoot: &[]bool{true}[0], + RunAsUser: &[]int64{54321}[0], + AllowPrivilegeEscalation: &[]bool{false}[0], + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{ + "ALL", + }, + }, + } +} + +func (r *OrdsSrvsReconciler) envDefine(ords *dbapi.OrdsSrvs, initContainer bool, ctx context.Context) []corev1.EnvVar { + envVarSecrets := []corev1.EnvVar{ + { + Name: "ORDS_CONFIG", + Value: ordsSABase + "/config", + }, + { + Name: "JAVA_TOOL_OPTIONS", + Value: "-Doracle.ml.version_check=false", + }, + } + + // Limitation case for ADB/mTLS/OraOper edge + if len(ords.Spec.PoolSettings) == 1 { + poolName := strings.ToLower(ords.Spec.PoolSettings[0].PoolName) + tnsAdmin := corev1.EnvVar{ + Name: "TNS_ADMIN", + Value: ordsSABase + "/config/databases/" + poolName + "/network/admin/", + } + envVarSecrets = append(envVarSecrets, tnsAdmin) + } + if initContainer { + for i := 0; i < len(ords.Spec.PoolSettings); i++ { + poolName := strings.ReplaceAll(strings.ToLower(ords.Spec.PoolSettings[i].PoolName), "-", "_") + + dbSecret := corev1.EnvVar{ + Name: poolName + "_dbsecret", + Value: r.CommonDecryptWithPrivKey3(ords, ords.Spec.PoolSettings[i].DBSecret.SecretName, ords.Spec.PoolSettings[i].DBSecret.PasswordKey, ctx), + } + + envVarSecrets = append(envVarSecrets, dbSecret) + + if ords.Spec.PoolSettings[i].DBAdminUserSecret.SecretName != "" { + autoUpgradeORDSEnv := corev1.EnvVar{ + Name: poolName + "_autoupgrade_ords", + Value: strconv.FormatBool(ords.Spec.PoolSettings[i].AutoUpgradeORDS), + } + autoUpgradeAPEXEnv := corev1.EnvVar{ + Name: poolName + "_autoupgrade_apex", + Value: strconv.FormatBool(ords.Spec.PoolSettings[i].AutoUpgradeAPEX), + } + + dbAdminUserSecret := corev1.EnvVar{ + Name: poolName + "_dbadminusersecret", + Value: r.CommonDecryptWithPrivKey3(ords, ords.Spec.PoolSettings[i].DBAdminUserSecret.SecretName, ords.Spec.PoolSettings[i].DBAdminUserSecret.PasswordKey, ctx), + } + envVarSecrets = append(envVarSecrets, dbAdminUserSecret, autoUpgradeORDSEnv, autoUpgradeAPEXEnv) + } + + if ords.Spec.PoolSettings[i].DBCDBAdminUserSecret.SecretName != "" { + + dbCDBAdminUserSecret := corev1.EnvVar{ + Name: poolName + "_dbcdbadminusersecret", + Value: r.CommonDecryptWithPrivKey3(ords, ords.Spec.PoolSettings[i].DBCDBAdminUserSecret.SecretName, ords.Spec.PoolSettings[i].DBCDBAdminUserSecret.PasswordKey, ctx), + } + + envVarSecrets = append(envVarSecrets, dbCDBAdminUserSecret) + } + } + } + + return envVarSecrets +} + +/************************************************* + * Deletions + **************************************************/ +func (r *OrdsSrvsReconciler) ConfigMapDelete(ctx context.Context, req ctrl.Request, ords *dbapi.OrdsSrvs, definedPools map[string]bool) (err error) { + // Delete Undefined Pool ConfigMaps + configMapList := &corev1.ConfigMapList{} + if err := r.List(ctx, configMapList, client.InNamespace(req.Namespace), + client.MatchingLabels(map[string]string{ + controllerLabelKey: controllerLabelVal, + "app.kubernetes.io/instance": ords.Name}), + ); err != nil { + return err + } + + for _, configMap := range configMapList.Items { + if configMap.Name == ords.Name+"-"+globalConfigMapName || configMap.Name == ords.Name+"-init-script" { + continue + } + if _, exists := definedPools[configMap.Name]; !exists { + if err := r.Delete(ctx, &configMap); err != nil { + return err + } + RestartPods = ords.Spec.ForceRestart + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Delete", "ConfigMap %s Deleted", configMap.Name) + } + } + + return nil +} + +func (r *OrdsSrvsReconciler) WorkloadDelete(ctx context.Context, req ctrl.Request, ords *dbapi.OrdsSrvs, kind string) (err error) { + logr := log.FromContext(ctx).WithName("WorkloadDelete") + + // Get Workloads + deploymentList := &appsv1.DeploymentList{} + if err := r.List(ctx, deploymentList, client.InNamespace(req.Namespace), + client.MatchingLabels(map[string]string{ + controllerLabelKey: controllerLabelVal, + "app.kubernetes.io/instance": ords.Name}), + ); err != nil { + return err + } + + statefulSetList := &appsv1.StatefulSetList{} + if err := r.List(ctx, statefulSetList, client.InNamespace(req.Namespace), + client.MatchingLabels(map[string]string{ + controllerLabelKey: controllerLabelVal, + "app.kubernetes.io/instance": ords.Name}), + ); err != nil { + return err + } + + daemonSetList := &appsv1.DaemonSetList{} + if err := r.List(ctx, daemonSetList, client.InNamespace(req.Namespace), + client.MatchingLabels(map[string]string{ + controllerLabelKey: controllerLabelVal, + "app.kubernetes.io/instance": ords.Name}), + ); err != nil { + return err + } + + switch kind { + case "StatefulSet": + for _, deleteDaemonSet := range daemonSetList.Items { + if err := r.Delete(ctx, &deleteDaemonSet); err != nil { + return err + } + logr.Info("Deleted: " + kind) + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Delete", "Workload %s Deleted", kind) + } + for _, deleteDeployment := range deploymentList.Items { + if err := r.Delete(ctx, &deleteDeployment); err != nil { + return err + } + logr.Info("Deleted: " + kind) + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Delete", "Workload %s Deleted", kind) + } + case "DaemonSet": + for _, deleteDeployment := range deploymentList.Items { + if err := r.Delete(ctx, &deleteDeployment); err != nil { + return err + } + logr.Info("Deleted: " + kind) + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Delete", "Workload %s Deleted", kind) + } + for _, deleteStatefulSet := range statefulSetList.Items { + if err := r.Delete(ctx, &deleteStatefulSet); err != nil { + return err + } + logr.Info("Deleted StatefulSet: " + deleteStatefulSet.Name) + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Delete", "Workload %s Deleted", kind) + } + default: + for _, deleteStatefulSet := range statefulSetList.Items { + if err := r.Delete(ctx, &deleteStatefulSet); err != nil { + return err + } + logr.Info("Deleted: " + kind) + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Delete", "Workload %s Deleted", kind) + } + for _, deleteDaemonSet := range daemonSetList.Items { + if err := r.Delete(ctx, &deleteDaemonSet); err != nil { + return err + } + logr.Info("Deleted: " + kind) + r.Recorder.Eventf(ords, corev1.EventTypeNormal, "Delete", "Workload %s Deleted", kind) + } + } + return nil +} + +/************************************************* + * Helpers + **************************************************/ +func getLabels(name string) map[string]string { + return map[string]string{ + "app.kubernetes.io/instance": name, + controllerLabelKey: controllerLabelVal, + } +} + +func generateSpecHash(spec interface{}) string { + byteArray, err := json.Marshal(spec) + if err != nil { + return "" + } + + hash := sha256.New() + _, err = hash.Write(byteArray) + if err != nil { + return "" + } + + hashBytes := hash.Sum(nil) + hashString := hex.EncodeToString(hashBytes[:8]) + + return hashString +} + +func CommonDecryptWithPrivKey(Key string, Buffer string) (string, error) { + + Debug := 0 + block, _ := pem.Decode([]byte(Key)) + pkcs8PrivateKey, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + fmt.Printf("Failed to parse private key %s \n", err.Error()) + return "", err + } + if Debug == 1 { + fmt.Printf("======================================\n") + fmt.Printf("%s\n", Key) + fmt.Printf("======================================\n") + } + + encString64, err := base64.StdEncoding.DecodeString(string(Buffer)) + if err != nil { + fmt.Printf("Failed to decode encrypted string to base64: %s\n", err.Error()) + return "", err + } + + if Debug == 1 { + fmt.Printf("======================================\n") + fmt.Printf("%s\n", encString64) + fmt.Printf("======================================\n") + } + + decryptedB, err := rsa.DecryptPKCS1v15(nil, pkcs8PrivateKey.(*rsa.PrivateKey), encString64) + if err != nil { + fmt.Printf("Failed to decrypt string %s\n", err.Error()) + return "", err + } + if Debug == 1 { + fmt.Printf("[%s]\n", string(decryptedB)) + } + return strings.TrimSpace(string(decryptedB)), err + +} + +func (r *OrdsSrvsReconciler) CommonDecryptWithPrivKey3(ords *dbapi.OrdsSrvs, sname string, skey string, ctx context.Context) string { + logr := log.FromContext(ctx).WithName("CommonDecryptWithPrivKey2") + secret_par := &corev1.Secret{} + fmt.Printf("sname: %s\n", sname) + fmt.Printf("skey: %s\n", skey) + err := r.Get(ctx, types.NamespacedName{Name: sname, Namespace: ords.Namespace}, secret_par) + if err != nil { + logr.Error(err, "Cannot read secret"+sname) + return "" + } + encVal := string(secret_par.Data[skey]) + encVal = strings.TrimSpace(encVal) + + secret_key := &corev1.Secret{} + /* get private key */ + if err := r.Get(ctx, types.NamespacedName{Name: ords.Spec.EncPrivKey.SecretName, + Namespace: ords.Namespace}, secret_key); err != nil { + logr.Error(err, "Cannot get privte key") + return "" + } + PrvKeyVal := string(secret_key.Data[ords.Spec.EncPrivKey.PasswordKey]) + PrvKeyVal = strings.TrimSpace(PrvKeyVal) + + decVal, err := CommonDecryptWithPrivKey(PrvKeyVal, encVal) + if err != nil { + logr.Error(err, "Fail to decrypt secret") + return "" + } + + logr.Info("Password decryption completed") + + return decVal +} diff --git a/controllers/database/ordssrvs_ordsconfig.go b/controllers/database/ordssrvs_ordsconfig.go new file mode 100644 index 00000000..edb2e0f6 --- /dev/null +++ b/controllers/database/ordssrvs_ordsconfig.go @@ -0,0 +1,258 @@ +/* +** Copyright (c) 2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package controllers + +import ( + "context" + "fmt" + "os" + "strings" + "time" + + dbapi "github.com/oracle/oracle-database-operator/apis/database/v4" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" +) + +func (r *OrdsSrvsReconciler) ConfigMapDefine(ctx context.Context, ords *dbapi.OrdsSrvs, configMapName string, poolIndex int) *corev1.ConfigMap { + var defData map[string]string + if configMapName == ords.Name+"-init-script" { + // Read the file from controller's filesystem + filePath := "/ords_init.sh" + scriptData, err := os.ReadFile(filePath) + if err != nil { + return nil + } + defData = map[string]string{ + "init_script.sh": string(scriptData)} + } else if configMapName == ords.Name+"-"+globalConfigMapName { + // GlobalConfigMap + var defStandaloneAccessLog string + if ords.Spec.GlobalSettings.EnableStandaloneAccessLog { + defStandaloneAccessLog = ` ` + ordsSABase + `/log/global` + "\n" + } + var defMongoAccessLog string + if ords.Spec.GlobalSettings.EnableMongoAccessLog { + defMongoAccessLog = ` ` + ordsSABase + `/log/global` + "\n" + } + var defCert string + if ords.Spec.GlobalSettings.CertSecret != nil { + defCert = ` ` + ordsSABase + `/config/certficate/` + ords.Spec.GlobalSettings.CertSecret.Certificate + `` + "\n" + + ` ` + ordsSABase + `/config/certficate/` + ords.Spec.GlobalSettings.CertSecret.CertificateKey + `` + "\n" + } + defData = map[string]string{ + "settings.xml": fmt.Sprintf(`` + "\n" + + `` + "\n" + + `` + "\n" + + conditionalEntry("cache.metadata.graphql.expireAfterAccess", ords.Spec.GlobalSettings.CacheMetadataGraphQLExpireAfterAccess) + + conditionalEntry("cache.metadata.jwks.enabled", ords.Spec.GlobalSettings.CacheMetadataJWKSEnabled) + + conditionalEntry("cache.metadata.jwks.initialCapacity", ords.Spec.GlobalSettings.CacheMetadataJWKSInitialCapacity) + + conditionalEntry("cache.metadata.jwks.maximumSize", ords.Spec.GlobalSettings.CacheMetadataJWKSMaximumSize) + + conditionalEntry("cache.metadata.jwks.expireAfterAccess", ords.Spec.GlobalSettings.CacheMetadataJWKSExpireAfterAccess) + + conditionalEntry("cache.metadata.jwks.expireAfterWrite", ords.Spec.GlobalSettings.CacheMetadataJWKSExpireAfterWrite) + + conditionalEntry("database.api.management.services.disabled", ords.Spec.GlobalSettings.DatabaseAPIManagementServicesDisabled) + + conditionalEntry("db.invalidPoolTimeout", ords.Spec.GlobalSettings.DBInvalidPoolTimeout) + + conditionalEntry("feature.graphql.max.nesting.depth", ords.Spec.GlobalSettings.FeatureGraphQLMaxNestingDepth) + + conditionalEntry("request.traceHeaderName", ords.Spec.GlobalSettings.RequestTraceHeaderName) + + conditionalEntry("security.credentials.attempts", ords.Spec.GlobalSettings.SecurityCredentialsAttempts) + + conditionalEntry("security.credentials.lock.time", ords.Spec.GlobalSettings.SecurityCredentialsLockTime) + + conditionalEntry("standalone.context.path", ords.Spec.GlobalSettings.StandaloneContextPath) + + conditionalEntry("standalone.http.port", ords.Spec.GlobalSettings.StandaloneHTTPPort) + + conditionalEntry("standalone.https.host", ords.Spec.GlobalSettings.StandaloneHTTPSHost) + + conditionalEntry("standalone.https.port", ords.Spec.GlobalSettings.StandaloneHTTPSPort) + + conditionalEntry("standalone.stop.timeout", ords.Spec.GlobalSettings.StandaloneStopTimeout) + + conditionalEntry("cache.metadata.timeout", ords.Spec.GlobalSettings.CacheMetadataTimeout) + + conditionalEntry("cache.metadata.enabled", ords.Spec.GlobalSettings.CacheMetadataEnabled) + + conditionalEntry("database.api.enabled", ords.Spec.GlobalSettings.DatabaseAPIEnabled) + + conditionalEntry("debug.printDebugToScreen", ords.Spec.GlobalSettings.DebugPrintDebugToScreen) + + conditionalEntry("error.responseFormat", ords.Spec.GlobalSettings.ErrorResponseFormat) + + conditionalEntry("icap.port", ords.Spec.GlobalSettings.ICAPPort) + + conditionalEntry("icap.secure.port", ords.Spec.GlobalSettings.ICAPSecurePort) + + conditionalEntry("icap.server", ords.Spec.GlobalSettings.ICAPServer) + + conditionalEntry("log.procedure", ords.Spec.GlobalSettings.LogProcedure) + + conditionalEntry("mongo.enabled", ords.Spec.GlobalSettings.MongoEnabled) + + conditionalEntry("mongo.port", ords.Spec.GlobalSettings.MongoPort) + + conditionalEntry("mongo.idle.timeout", ords.Spec.GlobalSettings.MongoIdleTimeout) + + conditionalEntry("mongo.op.timeout", ords.Spec.GlobalSettings.MongoOpTimeout) + + conditionalEntry("security.disableDefaultExclusionList", ords.Spec.GlobalSettings.SecurityDisableDefaultExclusionList) + + conditionalEntry("security.exclusionList", ords.Spec.GlobalSettings.SecurityExclusionList) + + conditionalEntry("security.inclusionList", ords.Spec.GlobalSettings.SecurityInclusionList) + + conditionalEntry("security.maxEntries", ords.Spec.GlobalSettings.SecurityMaxEntries) + + conditionalEntry("security.verifySSL", ords.Spec.GlobalSettings.SecurityVerifySSL) + + conditionalEntry("security.httpsHeaderCheck", ords.Spec.GlobalSettings.SecurityHTTPSHeaderCheck) + + conditionalEntry("security.forceHTTPS", ords.Spec.GlobalSettings.SecurityForceHTTPS) + + conditionalEntry("externalSessionTrustedOrigins", ords.Spec.GlobalSettings.SecuirtyExternalSessionTrustedOrigins) + + ` ` + ordsSABase + `/config/global/doc_root/` + "\n" + + // Dynamic + defStandaloneAccessLog + + defMongoAccessLog + + defCert + + // Disabled (but not forgotten) + // conditionalEntry("standalone.binds", ords.Spec.GlobalSettings.StandaloneBinds) + + // conditionalEntry("error.externalPath", ords.Spec.GlobalSettings.ErrorExternalPath) + + // conditionalEntry("security.credentials.file ", ords.Spec.GlobalSettings.SecurityCredentialsFile) + + // conditionalEntry("standalone.static.path", ords.Spec.GlobalSettings.StandaloneStaticPath) + + // conditionalEntry("standalone.doc.root", ords.Spec.GlobalSettings.StandaloneDocRoot) + + // conditionalEntry("standalone.static.context.path", ords.Spec.GlobalSettings.StandaloneStaticContextPath) + + ``), + "logging.properties": fmt.Sprintf(`handlers=java.util.logging.FileHandler` + "\n" + + `.level=SEVERE` + "\n" + + `java.util.logging.FileHandler.level=ALL` + "\n" + + `oracle.dbtools.level=FINEST` + "\n" + + `java.util.logging.FileHandler.pattern = ` + ordsSABase + `/log/global/debug.log` + "\n" + + `java.util.logging.FileHandler.formatter = java.util.logging.SimpleFormatter`), + } + } else { + // PoolConfigMap + poolName := strings.ToLower(ords.Spec.PoolSettings[poolIndex].PoolName) + var defDBNetworkPath string + if ords.Spec.PoolSettings[poolIndex].DBWalletSecret != nil { + defDBNetworkPath = ` ` + ordsSABase + `/config/databases/` + poolName + `/network/admin/` + ords.Spec.PoolSettings[poolIndex].DBWalletSecret.WalletName + `` + "\n" + + conditionalEntry("db.wallet.zip.service", strings.ToUpper(ords.Spec.PoolSettings[poolIndex].DBWalletZipService)) + "\n" + } else { + defDBNetworkPath = ` ` + ordsSABase + `/config/databases/` + poolName + `/network/admin/` + "\n" + } + defData = map[string]string{ + "pool.xml": fmt.Sprintf(`` + "\n" + + `` + "\n" + + `` + "\n" + + ` ` + ords.Spec.PoolSettings[poolIndex].DBUsername + `` + "\n" + + conditionalEntry("db.adminUser", ords.Spec.PoolSettings[poolIndex].DBAdminUser) + + conditionalEntry("db.cdb.adminUser", ords.Spec.PoolSettings[poolIndex].DBCDBAdminUser) + + conditionalEntry("apex.security.administrator.roles", ords.Spec.PoolSettings[poolIndex].ApexSecurityAdministratorRoles) + + conditionalEntry("apex.security.user.roles", ords.Spec.PoolSettings[poolIndex].ApexSecurityUserRoles) + + conditionalEntry("db.credentialsSource", ords.Spec.PoolSettings[poolIndex].DBCredentialsSource) + + conditionalEntry("db.poolDestroyTimeout", ords.Spec.PoolSettings[poolIndex].DBPoolDestroyTimeout) + + conditionalEntry("debug.trackResources", ords.Spec.PoolSettings[poolIndex].DebugTrackResources) + + conditionalEntry("feature.openservicebroker.exclude", ords.Spec.PoolSettings[poolIndex].FeatureOpenservicebrokerExclude) + + conditionalEntry("feature.sdw", ords.Spec.PoolSettings[poolIndex].FeatureSDW) + + conditionalEntry("http.cookie.filter", ords.Spec.PoolSettings[poolIndex].HttpCookieFilter) + + conditionalEntry("jdbc.auth.admin.role", ords.Spec.PoolSettings[poolIndex].JDBCAuthAdminRole) + + conditionalEntry("jdbc.cleanup.mode", ords.Spec.PoolSettings[poolIndex].JDBCCleanupMode) + + conditionalEntry("owa.trace.sql", ords.Spec.PoolSettings[poolIndex].OwaTraceSql) + + conditionalEntry("plsql.gateway.mode", ords.Spec.PoolSettings[poolIndex].PlsqlGatewayMode) + + conditionalEntry("security.jwt.profile.enabled", ords.Spec.PoolSettings[poolIndex].SecurityJWTProfileEnabled) + + conditionalEntry("security.jwks.size", ords.Spec.PoolSettings[poolIndex].SecurityJWKSSize) + + conditionalEntry("security.jwks.connection.timeout", ords.Spec.PoolSettings[poolIndex].SecurityJWKSConnectionTimeout) + + conditionalEntry("security.jwks.read.timeout", ords.Spec.PoolSettings[poolIndex].SecurityJWKSReadTimeout) + + conditionalEntry("security.jwks.refresh.interval", ords.Spec.PoolSettings[poolIndex].SecurityJWKSRefreshInterval) + + conditionalEntry("security.jwt.allowed.skew", ords.Spec.PoolSettings[poolIndex].SecurityJWTAllowedSkew) + + conditionalEntry("security.jwt.allowed.age", ords.Spec.PoolSettings[poolIndex].SecurityJWTAllowedAge) + + conditionalEntry("db.connectionType", ords.Spec.PoolSettings[poolIndex].DBConnectionType) + + conditionalEntry("db.customURL", ords.Spec.PoolSettings[poolIndex].DBCustomURL) + + conditionalEntry("db.hostname", ords.Spec.PoolSettings[poolIndex].DBHostname) + + conditionalEntry("db.port", ords.Spec.PoolSettings[poolIndex].DBPort) + + conditionalEntry("db.servicename", ords.Spec.PoolSettings[poolIndex].DBServicename) + + conditionalEntry("db.sid", ords.Spec.PoolSettings[poolIndex].DBSid) + + conditionalEntry("db.tnsAliasName", ords.Spec.PoolSettings[poolIndex].DBTnsAliasName) + + conditionalEntry("jdbc.DriverType", ords.Spec.PoolSettings[poolIndex].JDBCDriverType) + + conditionalEntry("jdbc.InactivityTimeout", ords.Spec.PoolSettings[poolIndex].JDBCInactivityTimeout) + + conditionalEntry("jdbc.InitialLimit", ords.Spec.PoolSettings[poolIndex].JDBCInitialLimit) + + conditionalEntry("jdbc.MaxConnectionReuseCount", ords.Spec.PoolSettings[poolIndex].JDBCMaxConnectionReuseCount) + + conditionalEntry("jdbc.MaxLimit", ords.Spec.PoolSettings[poolIndex].JDBCMaxLimit) + + conditionalEntry("jdbc.auth.enabled", ords.Spec.PoolSettings[poolIndex].JDBCAuthEnabled) + + conditionalEntry("jdbc.MaxStatementsLimit", ords.Spec.PoolSettings[poolIndex].JDBCMaxStatementsLimit) + + conditionalEntry("jdbc.MinLimit", ords.Spec.PoolSettings[poolIndex].JDBCMinLimit) + + conditionalEntry("jdbc.statementTimeout", ords.Spec.PoolSettings[poolIndex].JDBCStatementTimeout) + + conditionalEntry("jdbc.MaxConnectionReuseTime", ords.Spec.PoolSettings[poolIndex].JDBCMaxConnectionReuseTime) + + conditionalEntry("jdbc.SecondsToTrustIdleConnection", ords.Spec.PoolSettings[poolIndex].JDBCSecondsToTrustIdleConnection) + + conditionalEntry("misc.defaultPage", ords.Spec.PoolSettings[poolIndex].MiscDefaultPage) + + conditionalEntry("misc.pagination.maxRows", ords.Spec.PoolSettings[poolIndex].MiscPaginationMaxRows) + + conditionalEntry("procedure.postProcess", ords.Spec.PoolSettings[poolIndex].ProcedurePostProcess) + + conditionalEntry("procedure.preProcess", ords.Spec.PoolSettings[poolIndex].ProcedurePreProcess) + + conditionalEntry("procedure.rest.preHook", ords.Spec.PoolSettings[poolIndex].ProcedureRestPreHook) + + conditionalEntry("security.requestAuthenticationFunction", ords.Spec.PoolSettings[poolIndex].SecurityRequestAuthenticationFunction) + + conditionalEntry("security.requestValidationFunction", ords.Spec.PoolSettings[poolIndex].SecurityRequestValidationFunction) + + conditionalEntry("soda.defaultLimit", ords.Spec.PoolSettings[poolIndex].SODADefaultLimit) + + conditionalEntry("soda.maxLimit", ords.Spec.PoolSettings[poolIndex].SODAMaxLimit) + + conditionalEntry("restEnabledSql.active", ords.Spec.PoolSettings[poolIndex].RestEnabledSqlActive) + + defDBNetworkPath + + // Disabled (but not forgotten) + // conditionalEntry("autoupgrade.api.aulocation", ords.Spec.PoolSettings[poolIndex].AutoupgradeAPIAulocation) + + // conditionalEntry("autoupgrade.api.enabled", ords.Spec.PoolSettings[poolIndex].AutoupgradeAPIEnabled) + + // conditionalEntry("autoupgrade.api.jvmlocation", ords.Spec.PoolSettings[poolIndex].AutoupgradeAPIJvmlocation) + + // conditionalEntry("autoupgrade.api.loglocation", ords.Spec.PoolSettings[poolIndex].AutoupgradeAPILoglocation) + + // conditionalEntry("db.serviceNameSuffix", ords.Spec.PoolSettings[poolIndex].DBServiceNameSuffix) + + ``), + } + } + + objectMeta := objectMetaDefine(ords, configMapName) + def := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: "v1", + }, + ObjectMeta: objectMeta, + Data: defData, + } + + // Set the ownerRef + if err := ctrl.SetControllerReference(ords, def, r.Scheme); err != nil { + return nil + } + return def +} + +func conditionalEntry(key string, value interface{}) string { + switch v := value.(type) { + case nil: + return "" + case string: + if v != "" { + return fmt.Sprintf(` %s`+"\n", key, v) + } + case *int32: + if v != nil { + return fmt.Sprintf(` %d`+"\n", key, *v) + } + case *bool: + if v != nil { + return fmt.Sprintf(` %v`+"\n", key, *v) + } + case *time.Duration: + if v != nil { + return fmt.Sprintf(` %v`+"\n", key, *v) + } + default: + return fmt.Sprintf(` %v`+"\n", key, v) + } + return "" +} diff --git a/controllers/database/pdb_controller.go b/controllers/database/pdb_controller.go index f0b4fd46..a2ca0f85 100644 --- a/controllers/database/pdb_controller.go +++ b/controllers/database/pdb_controller.go @@ -55,7 +55,8 @@ import ( "strings" "time" - dbapi "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + dbapi "github.com/oracle/oracle-database-operator/apis/database/v4" + lrcommons "github.com/oracle/oracle-database-operator/commons/multitenant/lrest" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" @@ -82,6 +83,11 @@ type PDBReconciler struct { Recorder record.EventRecorder } +type controllers struct { + Pdbc PDBReconciler + Cdbc CDBReconciler +} + type RESTSQLCollection struct { Env struct { DefaultTimeZone string `json:"defaultTimeZone,omitempty"` @@ -426,7 +432,8 @@ func (r *PDBReconciler) getSecret(ctx context.Context, req ctrl.Request, pdb *db /* ************************************************ - Issue a REST API Call to the ORDS container - /*********************************************** + +*********************************************** */ func (r *PDBReconciler) callAPI(ctx context.Context, req ctrl.Request, pdb *dbapi.PDB, url string, payload map[string]string, action string) (string, error) { log := r.Log.WithValues("callAPI", req.NamespacedName) @@ -472,11 +479,6 @@ func (r *PDBReconciler) callAPI(ctx context.Context, req ctrl.Request, pdb *dbap } caCert := secret.Data[pdb.Spec.PDBTlsCat.Secret.Key] - /* - r.Recorder.Eventf(pdb, corev1.EventTypeWarning, "ORDSINFO", string(rsaKeyPEM)) - r.Recorder.Eventf(pdb, corev1.EventTypeWarning, "ORDSINFO", string(rsaCertPEM)) - r.Recorder.Eventf(pdb, corev1.EventTypeWarning, "ORDSINFO", string(caCert)) - */ certificate, err := tls.X509KeyPair([]byte(rsaCertPEM), []byte(rsaKeyPEM)) if err != nil { @@ -620,7 +622,8 @@ func (r *PDBReconciler) callAPI(ctx context.Context, req ctrl.Request, pdb *dbap /* ************************************************ - Create a PDB - /*********************************************** + +*********************************************** */ func (r *PDBReconciler) createPDB(ctx context.Context, req ctrl.Request, pdb *dbapi.PDB) error { @@ -635,15 +638,52 @@ func (r *PDBReconciler) createPDB(ctx context.Context, req ctrl.Request, pdb *db return err } - pdbAdminName, err := r.getSecret(ctx, req, pdb, pdb.Spec.AdminName.Secret.SecretName, pdb.Spec.AdminName.Secret.Key) + /*** BEGIN GET ENCPASS ***/ + secret := &corev1.Secret{} + + err = r.Get(ctx, types.NamespacedName{Name: pdb.Spec.AdminName.Secret.SecretName, Namespace: pdb.Namespace}, secret) if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + pdb.Spec.AdminName.Secret.SecretName) + return err + } + log.Error(err, "Unable to get the secret.") + return err + } + pdbAdminNameEnc := string(secret.Data[pdb.Spec.AdminName.Secret.Key]) + pdbAdminNameEnc = strings.TrimSpace(pdbAdminNameEnc) + + err = r.Get(ctx, types.NamespacedName{Name: pdb.Spec.PDBPriKey.Secret.SecretName, Namespace: pdb.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + pdb.Spec.PDBPriKey.Secret.SecretName) + return err + } + log.Error(err, "Unable to get the secret.") return err } - pdbAdminPwd, err := r.getSecret(ctx, req, pdb, pdb.Spec.AdminPwd.Secret.SecretName, pdb.Spec.AdminPwd.Secret.Key) + privKey := string(secret.Data[pdb.Spec.PDBPriKey.Secret.Key]) + pdbAdminName, err := lrcommons.CommonDecryptWithPrivKey(privKey, pdbAdminNameEnc, req) + + // Get Web Server User Password + secret = &corev1.Secret{} + err = r.Get(ctx, types.NamespacedName{Name: pdb.Spec.AdminPwd.Secret.SecretName, Namespace: pdb.Namespace}, secret) if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + pdb.Spec.AdminPwd.Secret.SecretName) + return err + } + log.Error(err, "Unable to get the secret.") return err } + pdbAdminPwdEnc := string(secret.Data[pdb.Spec.AdminPwd.Secret.Key]) + pdbAdminPwdEnc = strings.TrimSpace(pdbAdminPwdEnc) + pdbAdminPwd, err := lrcommons.CommonDecryptWithPrivKey(privKey, pdbAdminPwdEnc, req) + pdbAdminName = strings.TrimSuffix(pdbAdminName, "\n") + pdbAdminPwd = strings.TrimSuffix(pdbAdminPwd, "\n") + /*** END GET ENCPASS ***/ + log.Info("====================> " + pdbAdminName + ":" + pdbAdminPwd) /* Prevent creating an existing pdb */ err = r.getPDBState(ctx, req, pdb) if err != nil { @@ -656,9 +696,6 @@ func (r *PDBReconciler) createPDB(ctx context.Context, req ctrl.Request, pdb *db return nil } - pdbAdminName = strings.TrimSuffix(pdbAdminName, "\n") - pdbAdminPwd = strings.TrimSuffix(pdbAdminPwd, "\n") - values := map[string]string{ "method": "CREATE", "pdb_name": pdb.Spec.PDBName, @@ -696,7 +733,7 @@ func (r *PDBReconciler) createPDB(ctx context.Context, req ctrl.Request, pdb *db if err := r.Status().Update(ctx, pdb); err != nil { log.Error(err, "Failed to update status for :"+pdb.Name, "err", err.Error()) } - _, err = r.callAPI(ctx, req, pdb, url, values, "POST") + _, err = NewCallApi(r, ctx, req, pdb, url, values, "POST") if err != nil { log.Error(err, "callAPI error", "err", err.Error()) return err @@ -724,7 +761,8 @@ func (r *PDBReconciler) createPDB(ctx context.Context, req ctrl.Request, pdb *db /* ************************************************ - Clone a PDB - /*********************************************** + +*********************************************** */ func (r *PDBReconciler) clonePDB(ctx context.Context, req ctrl.Request, pdb *dbapi.PDB) error { @@ -773,8 +811,6 @@ func (r *PDBReconciler) clonePDB(ctx context.Context, req ctrl.Request, pdb *dba values["tempSize"] = pdb.Spec.TempSize } - //url := "https://"+ pdb.Spec.CDBNamespace + "." + pdb.Spec.CDBResName + "-ords:" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdb.Spec.SrcPDBName + "/" - //url := "https://" + pdb.Spec.CDBResName + "-ords:" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdb.Spec.SrcPDBName + "/" url := "https://" + pdb.Spec.CDBResName + "-ords." + pdb.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdb.Spec.SrcPDBName + "/" pdb.Status.Phase = pdbPhaseClone @@ -782,7 +818,7 @@ func (r *PDBReconciler) clonePDB(ctx context.Context, req ctrl.Request, pdb *dba if err := r.Status().Update(ctx, pdb); err != nil { log.Error(err, "Failed to update status for :"+pdb.Name, "err", err.Error()) } - _, err = r.callAPI(ctx, req, pdb, url, values, "POST") + _, err = NewCallApi(r, ctx, req, pdb, url, values, "POST") if err != nil { return err } @@ -809,7 +845,8 @@ func (r *PDBReconciler) clonePDB(ctx context.Context, req ctrl.Request, pdb *dba /* ************************************************ - Plug a PDB - /*********************************************** + +*********************************************** */ func (r *PDBReconciler) plugPDB(ctx context.Context, req ctrl.Request, pdb *dbapi.PDB) error { @@ -868,7 +905,7 @@ func (r *PDBReconciler) plugPDB(ctx context.Context, req ctrl.Request, pdb *dbap if err := r.Status().Update(ctx, pdb); err != nil { log.Error(err, "Failed to update status for :"+pdb.Name, "err", err.Error()) } - _, err = r.callAPI(ctx, req, pdb, url, values, "POST") + _, err = NewCallApi(r, ctx, req, pdb, url, values, "POST") if err != nil { return err } @@ -879,6 +916,7 @@ func (r *PDBReconciler) plugPDB(ctx context.Context, req ctrl.Request, pdb *dbap pdb.Status.ConnString = cdb.Spec.DBServer + ":" + strconv.Itoa(cdb.Spec.DBPort) + "/" + pdb.Spec.PDBName } else { pdb.Status.ConnString = cdb.Spec.DBTnsurl + ParseTnsAlias(&(pdb.Status.ConnString), &(pdb.Spec.PDBName)) } assertivePdbDeletion = pdb.Spec.AssertivePdbDeletion @@ -894,7 +932,8 @@ func (r *PDBReconciler) plugPDB(ctx context.Context, req ctrl.Request, pdb *dbap /* ************************************************ - Unplug a PDB - /*********************************************** + +*********************************************** */ func (r *PDBReconciler) unplugPDB(ctx context.Context, req ctrl.Request, pdb *dbapi.PDB) error { @@ -950,7 +989,7 @@ func (r *PDBReconciler) unplugPDB(ctx context.Context, req ctrl.Request, pdb *db if err := r.Status().Update(ctx, pdb); err != nil { log.Error(err, "Failed to update status for :"+pdb.Name, "err", err.Error()) } - _, err = r.callAPI(ctx, req, pdb, url, values, "POST") + _, err = NewCallApi(r, ctx, req, pdb, url, values, "POST") if err != nil { return err } @@ -1036,7 +1075,7 @@ func (r *PDBReconciler) modifyPDB(ctx context.Context, req ctrl.Request, pdb *db if err := r.Status().Update(ctx, pdb); err != nil { log.Error(err, "Failed to update status for :"+pdb.Name, "err", err.Error()) } - _, err = r.callAPI(ctx, req, pdb, url, values, "POST") + _, err = NewCallApi(r, ctx, req, pdb, url, values, "POST") if err != nil { return err } @@ -1078,7 +1117,7 @@ func (r *PDBReconciler) getPDBState(ctx context.Context, req ctrl.Request, pdb * log.Error(err, "Failed to update status for :"+pdb.Name, "err", err.Error()) } - respData, err := r.callAPI(ctx, req, pdb, url, nil, "GET") + respData, err := NewCallApi(r, ctx, req, pdb, url, nil, "GET") if err != nil { pdb.Status.OpenMode = "UNKNOWN" @@ -1129,7 +1168,7 @@ func (r *PDBReconciler) mapPDB(ctx context.Context, req ctrl.Request, pdb *dbapi log.Error(err, "Failed to update status for :"+pdb.Name, "err", err.Error()) } - respData, err := r.callAPI(ctx, req, pdb, url, nil, "GET") + respData, err := NewCallApi(r, ctx, req, pdb, url, nil, "GET") if err != nil { pdb.Status.OpenMode = "UNKNOWN" @@ -1227,6 +1266,7 @@ func (r *PDBReconciler) managePDBDeletion2(ctx context.Context, req ctrl.Request return err } + var errclose error pdbName := pdb.Spec.PDBName if pdb.Status.OpenMode == "READ WRITE" { valuesclose := map[string]string{ @@ -1234,23 +1274,26 @@ func (r *PDBReconciler) managePDBDeletion2(ctx context.Context, req ctrl.Request "modifyOption": "IMMEDIATE", "getScript": "FALSE"} url := "https://" + pdb.Spec.CDBResName + "-ords." + pdb.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdbName + "/status" - _, errclose := r.callAPI(ctx, req, pdb, url, valuesclose, "POST") + _, errclose = NewCallApi(r, ctx, req, pdb, url, valuesclose, "POST") if errclose != nil { log.Info("Warning error closing pdb continue anyway") } } - valuesdrop := map[string]string{ - "action": "INCLUDING", - "getScript": "FALSE"} - url := "https://" + pdb.Spec.CDBResName + "-ords." + pdb.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdbName + "/" - - log.Info("Call Delete()") - _, errdelete := r.callAPI(ctx, req, pdb, url, valuesdrop, "DELETE") - if errdelete != nil { - log.Error(errdelete, "Fail to delete pdb :"+pdb.Name, "err", err.Error()) - return errdelete + if errclose == nil { + valuesdrop := map[string]string{ + "action": "INCLUDING", + "getScript": "FALSE"} + url := "https://" + pdb.Spec.CDBResName + "-ords." + pdb.Spec.CDBNamespace + ":" + strconv.Itoa(cdb.Spec.ORDSPort) + "/ords/_/db-api/latest/database/pdbs/" + pdbName + "/" + + log.Info("Call Delete()") + _, errdelete := NewCallApi(r, ctx, req, pdb, url, valuesdrop, "DELETE") + if errdelete != nil { + log.Error(errdelete, "Fail to delete pdb :"+pdb.Name, "err", errdelete.Error()) + return errdelete + } } + } /* END OF ASSERTIVE SECTION */ log.Info("Marked to be deleted") @@ -1304,7 +1347,7 @@ func (r *PDBReconciler) deletePDBInstance(req ctrl.Request, ctx context.Context, if err := r.Status().Update(ctx, pdb); err != nil { log.Error(err, "Failed to update status for :"+pdb.Name, "err", err.Error()) } - _, err = r.callAPI(ctx, req, pdb, url, values, "DELETE") + _, err = NewCallApi(r, ctx, req, pdb, url, values, "DELETE") if err != nil { pdb.Status.ConnString = "" return err @@ -1363,3 +1406,226 @@ func ParseTnsAlias(tns *string, pdbsrv *string) { fmt.Printf("Newstring [%s]\n", *tns) } + +func NewCallApi(intr interface{}, ctx context.Context, req ctrl.Request, pdb *dbapi.PDB, url string, payload map[string]string, action string) (string, error) { + + var c client.Client + var r logr.Logger + var e record.EventRecorder + var err error + + recpdb, ok1 := intr.(*PDBReconciler) + if ok1 { + fmt.Printf("func NewCallApi ((*PDBReconciler),......)\n") + c = recpdb.Client + e = recpdb.Recorder + r = recpdb.Log + } + + reccdb, ok2 := intr.(*CDBReconciler) + if ok2 { + fmt.Printf("func NewCallApi ((*CDBReconciler),......)\n") + c = reccdb.Client + e = reccdb.Recorder + r = reccdb.Log + } + + secret := &corev1.Secret{} + + log := r.WithValues("NewCallApi", req.NamespacedName) + log.Info("Call c.Get") + err = c.Get(ctx, types.NamespacedName{Name: pdb.Spec.PDBTlsKey.Secret.SecretName, Namespace: pdb.Namespace}, secret) + + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + pdb.Spec.PDBTlsKey.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + rsaKeyPEM := secret.Data[pdb.Spec.PDBTlsKey.Secret.Key] + + err = c.Get(ctx, types.NamespacedName{Name: pdb.Spec.PDBTlsCrt.Secret.SecretName, Namespace: pdb.Namespace}, secret) + + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + pdb.Spec.PDBTlsCrt.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + + rsaCertPEM := secret.Data[pdb.Spec.PDBTlsCrt.Secret.Key] + + err = c.Get(ctx, types.NamespacedName{Name: pdb.Spec.PDBTlsCat.Secret.SecretName, Namespace: pdb.Namespace}, secret) + + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + pdb.Spec.PDBTlsCat.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + + caCert := secret.Data[pdb.Spec.PDBTlsCat.Secret.Key] + /* + r.Recorder.Eventf(pdb, corev1.EventTypeWarning, "ORDSINFO", string(rsaKeyPEM)) + r.Recorder.Eventf(pdb, corev1.EventTypeWarning, "ORDSINFO", string(rsaCertPEM)) + r.Recorder.Eventf(pdb, corev1.EventTypeWarning, "ORDSINFO", string(caCert)) + */ + + certificate, err := tls.X509KeyPair([]byte(rsaCertPEM), []byte(rsaKeyPEM)) + if err != nil { + pdb.Status.Msg = "Error tls.X509KeyPair" + return "", err + } + + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + + tlsConf := &tls.Config{Certificates: []tls.Certificate{certificate}, RootCAs: caCertPool} + + tr := &http.Transport{TLSClientConfig: tlsConf} + + httpclient := &http.Client{Transport: tr} + + log.Info("Issuing REST call", "URL", url, "Action", action) + + /* + cdb, err := r.getCDBResource(ctx, req, pdb) + if err != nil { + return "", err + } + */ + + err = c.Get(ctx, types.NamespacedName{Name: pdb.Spec.WebServerUsr.Secret.SecretName, Namespace: pdb.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + pdb.Spec.WebServerUsr.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + webUserEnc := string(secret.Data[pdb.Spec.WebServerUsr.Secret.Key]) + webUserEnc = strings.TrimSpace(webUserEnc) + + err = c.Get(ctx, types.NamespacedName{Name: pdb.Spec.PDBPriKey.Secret.SecretName, Namespace: pdb.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + pdb.Spec.PDBPriKey.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + privKey := string(secret.Data[pdb.Spec.PDBPriKey.Secret.Key]) + webUser, err := lrcommons.CommonDecryptWithPrivKey(privKey, webUserEnc, req) + + // Get Web Server User Password + secret = &corev1.Secret{} + err = c.Get(ctx, types.NamespacedName{Name: pdb.Spec.WebServerPwd.Secret.SecretName, Namespace: pdb.Namespace}, secret) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("Secret not found:" + pdb.Spec.WebServerPwd.Secret.SecretName) + return "", err + } + log.Error(err, "Unable to get the secret.") + return "", err + } + webUserPwdEnc := string(secret.Data[pdb.Spec.WebServerPwd.Secret.Key]) + webUserPwdEnc = strings.TrimSpace(webUserPwdEnc) + webUserPwd, err := lrcommons.CommonDecryptWithPrivKey(privKey, webUserPwdEnc, req) + /////////////////////////////////////////////////////////////////////////////////// + + var httpreq *http.Request + if action == "GET" { + httpreq, err = http.NewRequest(action, url, nil) + } else { + jsonValue, _ := json.Marshal(payload) + httpreq, err = http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) + } + + if err != nil { + log.Info("Unable to create HTTP Request for PDB : "+pdb.Name, "err", err.Error()) + return "", err + } + + httpreq.Header.Add("Accept", "application/json") + httpreq.Header.Add("Content-Type", "application/json") + httpreq.SetBasicAuth(webUser, webUserPwd) + + resp, err := httpclient.Do(httpreq) + if err != nil { + errmsg := err.Error() + log.Error(err, "Failed - Could not connect to ORDS Pod", "err", err.Error()) + pdb.Status.Msg = "Error: Could not connect to ORDS Pod" + e.Eventf(pdb, corev1.EventTypeWarning, "ORDSError", errmsg) + return "", err + } + + e.Eventf(pdb, corev1.EventTypeWarning, "Done", pdb.Spec.CDBResName) + if resp.StatusCode != http.StatusOK { + bb, _ := ioutil.ReadAll(resp.Body) + + if resp.StatusCode == 404 { + pdb.Status.ConnString = "" + pdb.Status.Msg = pdb.Spec.PDBName + " not found" + + } else { + if floodcontrol == false { + pdb.Status.Msg = "ORDS Error - HTTP Status Code:" + strconv.Itoa(resp.StatusCode) + } + } + + if floodcontrol == false { + log.Info("ORDS Error - HTTP Status Code :"+strconv.Itoa(resp.StatusCode), "Err", string(bb)) + } + + var apiErr ORDSError + json.Unmarshal([]byte(bb), &apiErr) + if floodcontrol == false { + e.Eventf(pdb, corev1.EventTypeWarning, "ORDSError", "Failed: %s", apiErr.Message) + } + //fmt.Printf("%+v", apiErr) + //fmt.Println(string(bb)) + floodcontrol = true + return "", errors.New("ORDS Error") + } + floodcontrol = false + + defer resp.Body.Close() + + bodyBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + fmt.Print(err.Error()) + } + respData := string(bodyBytes) + //fmt.Println(string(bodyBytes)) + + var apiResponse RESTSQLCollection + json.Unmarshal([]byte(bodyBytes), &apiResponse) + //fmt.Printf("%#v", apiResponse) + //fmt.Printf("%+v", apiResponse) + + errFound := false + for _, sqlItem := range apiResponse.Items { + if sqlItem.ErrorDetails != "" { + log.Info("ORDS Error - Oracle Error Code :" + strconv.Itoa(sqlItem.ErrorCode)) + if !errFound { + pdb.Status.Msg = sqlItem.ErrorDetails + } + e.Eventf(pdb, corev1.EventTypeWarning, "OraError", "%s", sqlItem.ErrorDetails) + errFound = true + } + } + + if errFound { + return "", errors.New("Oracle Error") + } + + return respData, nil +} diff --git a/controllers/database/shardingdatabase_controller.go b/controllers/database/shardingdatabase_controller.go index 7fcaac2b..1ec77253 100644 --- a/controllers/database/shardingdatabase_controller.go +++ b/controllers/database/shardingdatabase_controller.go @@ -67,18 +67,17 @@ import ( "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" - databasev1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + databasev4 "github.com/oracle/oracle-database-operator/apis/database/v4" shardingv1 "github.com/oracle/oracle-database-operator/commons/sharding" ) -// Sharding Topology -type ShardingTopology struct { - topicid string - Instance *databasev1alpha1.ShardingDatabase - deltopology bool - onsProvider common.ConfigurationProvider - onsProviderFlag bool - rclient ons.NotificationDataPlaneClient +// Struct keeping Oracle Notification Server Info +type OnsStatus struct { + Topicid string `json:"topicid,omitempty"` + Instance *databasev4.ShardingDatabase `json:"instance,omitempty"` + OnsProvider common.ConfigurationProvider `json:"onsProvider,omitempty"` + OnsProviderFlag bool `json:"onsProviderFlag,omitempty"` + Rclient ons.NotificationDataPlaneClient `json:"rclient,omitempty"` } // ShardingDatabaseReconciler reconciles a ShardingDatabase object @@ -89,7 +88,6 @@ type ShardingDatabaseReconciler struct { kubeClient kubernetes.Interface kubeConfig clientcmd.ClientConfig Recorder record.EventRecorder - osh []*ShardingTopology InCluster bool Namespace string } @@ -97,6 +95,8 @@ type ShardingDatabaseReconciler struct { var sentFailMsg = make(map[string]bool) var sentCompleteMsg = make(map[string]bool) +var oshMap = make(map[string]*OnsStatus) + // +kubebuilder:rbac:groups=database.oracle.com,resources=shardingdatabases,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=database.oracle.com,resources=shardingdatabases/status,verbs=get;update;patch // +kubebuilder:rbac:groups=database.oracle.com,resources=shardingdatabases/finalizers,verbs=get;create;update;patch;delete @@ -120,19 +120,19 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req // your logic here var i int32 - //var ShardImageLatest []databasev1alpha1.ShardSpec - var OraCatalogSpex databasev1alpha1.CatalogSpec - var OraShardSpex databasev1alpha1.ShardSpec - var OraGsmSpex databasev1alpha1.GsmSpec + //var ShardImageLatest []databasev4.ShardSpec + var OraCatalogSpex databasev4.CatalogSpec + var OraShardSpex databasev4.ShardSpec + var OraGsmSpex databasev4.GsmSpec var result ctrl.Result var isShardTopologyDeleteTrue bool = false //var msg string var err error - var idx int var stateType string resultNq := ctrl.Result{Requeue: false} resultQ := ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second} var nilErr error = nil + var msg string // On every reconcile, we will call setCrdLifeCycleState // To understand this, please refer https://sdk.operatorframework.io/docs/building-operators/golang/advanced-topics/ @@ -146,7 +146,7 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req } } // Fetch the ProvShard instance - instance := &databasev1alpha1.ShardingDatabase{} + instance := &databasev4.ShardingDatabase{} err = r.Client.Get(context.TODO(), req.NamespacedName, instance) if err != nil { if errors.IsNotFound(err) { @@ -159,14 +159,10 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req return ctrl.Result{}, err } - _, instFlag := r.checkProvInstance(instance) - // assinging osh instance + instFlag := r.checkProvInstance(instance) if !instFlag { - // Sharding Topolgy Struct Assignment - // ====================================== - osh := &ShardingTopology{} - osh.Instance = instance - r.osh = append(r.osh, osh) + oshMap[instance.Name] = &OnsStatus{} + oshMap[instance.Name].Instance = instance } defer r.setCrdLifeCycleState(instance, &result, &err, &stateType) defer r.updateShardTopologyStatus(instance) @@ -187,30 +183,20 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req } // ======== Setting the flag and Index to be used later in this function ======== - idx, instFlag = r.checkProvInstance(instance) - if !instFlag { - //r.setCrdLifeCycleState(instance, &result, &err, stateType) - result = resultNq - return result, fmt.Errorf("DId not find the instance in checkProvInstance") - } + // instFlag = r.checkProvInstance(instance) + // if !instFlag { + //r.setCrdLifeCycleState(instance, &result, &err, stateType) + //// result = resultNq + // return result, fmt.Errorf("DId not find the instance in checkProvInstance") + // } // ================================ OCI Notification Provider =========== - r.getOnsConfigProvider(instance, idx) + r.getOnsConfigProvider(instance) // =============================== Checking Namespace ============== - if instance.Spec.Namespace == "" { - ///err = shardingv1.AddNamespace(instance, r.Client, r.Log) - //if err != nil { - // //r.setCrdLifeCycleState(instance, &result, &err, stateType) - // result = resultNq - // return result, err - // } - // } else { - instance.Spec.Namespace = "default" - } // ======================== Validate Specs ============== - err = r.validateSpex(instance, idx) + err = r.validateSpex(instance) if err != nil { //r.setCrdLifeCycleState(instance, &result, &err, stateType) result = resultNq @@ -239,6 +225,12 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req if len(instance.Spec.Catalog) > 0 { for i = 0; i < int32(len(instance.Spec.Catalog)); i++ { OraCatalogSpex = instance.Spec.Catalog[i] + if len(OraCatalogSpex.Name) > 9 { + msg = "Catalog Name cannot be greater than 9 characters." + err = fmt.Errorf(msg) + result = resultNq + return result, err + } // See if StatefulSets already exists and create if it doesn't result, err = r.deployStatefulSet(instance, shardingv1.BuildStatefulSetForCatalog(instance, OraCatalogSpex), "CATALOG") if err != nil { @@ -290,6 +282,12 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req // if user set replicasize greater than 1 but also set instance.Spec.OraDbPvcName then only one service will be created and one pod for i = 0; i < int32(len(instance.Spec.Shard)); i++ { OraShardSpex = instance.Spec.Shard[i] + if len(OraShardSpex.Name) > 9 { + msg = "Shard Name cannot be greater than 9 characters." + err = fmt.Errorf(msg) + result = resultNq + return result, err + } if !shardingv1.CheckIsDeleteFlag(OraShardSpex.IsDelete, instance, r.Log) { result, err = r.createService(instance, shardingv1.BuildServiceDefForShard(instance, 0, OraShardSpex, "local")) if err != nil { @@ -342,7 +340,7 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req // Loop will be requeued only if Shard Statefulset is not ready or not configured. // Till that time Reconcilation loop will remain in blocked state // if the err is return because of Shard is not ready then blocked state is rmeoved and reconcilation state is set - err = r.addPrimaryShards(instance, idx) + err = r.addPrimaryShards(instance) if err != nil { // time.Sleep(30 * time.Second) err = nilErr @@ -353,7 +351,7 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req // Loop will be requeued only if Standby Shard Statefulset is not ready or not configured. // Till that time Reconcilation loop will remain in blocked state // if the err is return because of Shard is not ready then blocked state is rmeoved and reconcilation state is - err = r.addStandbyShards(instance, idx) + err = r.addStandbyShards(instance) if err != nil { // time.Sleep(30 * time.Second) err = nilErr @@ -363,7 +361,7 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req // we don't need to run the requeue loop but still putting this condition to address any unkown situation // delShard function set the state to blocked and we do not allow any other operationn while delete is going on - err = r.delGsmShard(instance, idx) + err = r.delGsmShard(instance) if err != nil { // time.Sleep(30 * time.Second) err = nilErr @@ -376,13 +374,13 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req OraCatalogSpex = instance.Spec.Catalog[i] sfSet, catalogPod, err := r.validateInvidualCatalog(instance, OraCatalogSpex, int(i)) if err != nil { - shardingv1.LogMessages("INFO", "Catalog "+sfSet.Name+" is not in available state.", nil, instance, r.Log) + shardingv1.LogMessages("Error", "Catalog "+sfSet.Name+" is not in available state.", nil, instance, r.Log) result = resultNq return result, err } result, err = shardingv1.UpdateProvForCatalog(instance, OraCatalogSpex, r.Client, sfSet, catalogPod, r.Log) if err != nil { - shardingv1.LogMessages("INFO", "Error Occurred during catalog update operation.", nil, instance, r.Log) + shardingv1.LogMessages("Error", "Error Occurred during catalog update operation.", nil, instance, r.Log) result = resultNq return result, err } @@ -394,13 +392,13 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req if !shardingv1.CheckIsDeleteFlag(OraShardSpex.IsDelete, instance, r.Log) { sfSet, shardPod, err := r.validateShard(instance, OraShardSpex, int(i)) if err != nil { - shardingv1.LogMessages("INFO", "Shard "+sfSet.Name+" is not in available state.", nil, instance, r.Log) + shardingv1.LogMessages("Error", "Shard "+sfSet.Name+" is not in available state.", nil, instance, r.Log) result = resultNq return result, err } result, err = shardingv1.UpdateProvForShard(instance, OraShardSpex, r.Client, sfSet, shardPod, r.Log) if err != nil { - shardingv1.LogMessages("INFO", "Error Occurred during shard update operation..", nil, instance, r.Log) + shardingv1.LogMessages("Error", "Error Occurred during shard update operation..", nil, instance, r.Log) result = resultNq return result, err } @@ -412,19 +410,19 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req OraGsmSpex = instance.Spec.Gsm[i] sfSet, gsmPod, err := r.validateInvidualGsm(instance, OraGsmSpex, int(i)) if err != nil { - shardingv1.LogMessages("INFO", "Gsm "+sfSet.Name+" is not in available state.", nil, instance, r.Log) + shardingv1.LogMessages("Error", "Gsm "+sfSet.Name+" is not in available state.", nil, instance, r.Log) result = resultNq return result, err } result, err = shardingv1.UpdateProvForGsm(instance, OraGsmSpex, r.Client, sfSet, gsmPod, r.Log) if err != nil { - shardingv1.LogMessages("INFO", "Error Occurred during GSM update operation.", nil, instance, r.Log) + shardingv1.LogMessages("Error", "Error Occurred during GSM update operation.", nil, instance, r.Log) result = resultNq return result, err } } - stateType = string(databasev1alpha1.CrdReconcileCompeleteState) + stateType = string(databasev4.CrdReconcileCompeleteState) // r.setCrdLifeCycleState(instance, &result, &err, stateType) // Set error to ni to avoid reconcilation state reconcilation error as we are passing err to setCrdLifeCycleState @@ -439,7 +437,7 @@ func (r *ShardingDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Req // Check https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/controller#Options to under MaxConcurrentReconciles func (r *ShardingDatabaseReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&databasev1alpha1.ShardingDatabase{}). + For(&databasev4.ShardingDatabase{}). Owns(&appsv1.StatefulSet{}). Owns(&corev1.Service{}). Owns(&corev1.Pod{}). @@ -457,65 +455,58 @@ func (r *ShardingDatabaseReconciler) eventFilterPredicate() predicate.Predicate return true }, UpdateFunc: func(e event.UpdateEvent) bool { + instance := &databasev4.ShardingDatabase{} if old, ok := e.ObjectOld.(*corev1.Secret); ok { if new, ok := e.ObjectNew.(*corev1.Secret); ok { - for i := 0; i < len(r.osh); i++ { - oshInst := r.osh[i] - if (new.Name == oshInst.Instance.Spec.DbSecret.Name) && (new.Name == old.Name) { - _, ok := old.Data[oshInst.Instance.Spec.DbSecret.PwdFileName] - if ok { - if !reflect.DeepEqual(old.Data[oshInst.Instance.Spec.DbSecret.PwdFileName], new.Data[oshInst.Instance.Spec.DbSecret.PwdFileName]) { - shardingv1.LogMessages("INFO", "Secret Changed", nil, oshInst.Instance, r.Log) - } + oshInst := instance + if (new.Name == oshInst.Spec.DbSecret.Name) && (new.Name == old.Name) { + _, ok := old.Data[oshInst.Spec.DbSecret.PwdFileName] + if ok { + if !reflect.DeepEqual(old.Data[oshInst.Spec.DbSecret.PwdFileName], new.Data[oshInst.Spec.DbSecret.PwdFileName]) { + shardingv1.LogMessages("INFO", "Secret Changed", nil, oshInst, r.Log) } - shardingv1.LogMessages("INFO", "Secret update block", nil, oshInst.Instance, r.Log) } + shardingv1.LogMessages("INFO", "Secret update block", nil, oshInst, r.Log) } } } return true }, DeleteFunc: func(e event.DeleteEvent) bool { + instance := &databasev4.ShardingDatabase{} _, podOk := e.Object.GetLabels()["statefulset.kubernetes.io/pod-name"] - for i := 0; i < len(r.osh); i++ { - if r.osh[i] != nil { - oshInst := r.osh[i] - if oshInst.deltopology == true { - break + if oshMap[instance.Name] != nil { + oshInst := instance + if instance.DeletionTimestamp == nil { - } - if e.Object.GetLabels()[string(databasev1alpha1.ShardingDelLabelKey)] == string(databasev1alpha1.ShardingDelLabelTrueValue) { - break + if e.Object.GetLabels()[string(databasev4.ShardingDelLabelKey)] == string(databasev4.ShardingDelLabelTrueValue) { } if podOk { delObj := e.Object.(*corev1.Pod) - if e.Object.GetLabels()["type"] == "Shard" && e.Object.GetLabels()["app"] == "OracleSharding" && e.Object.GetLabels()["oralabel"] == oshInst.Instance.Name { + if e.Object.GetLabels()["type"] == "Shard" && e.Object.GetLabels()["app"] == "OracleSharding" && e.Object.GetLabels()["oralabel"] == oshInst.Name { if delObj.DeletionTimestamp != nil { - go r.gsmInvitedNodeOp(oshInst.Instance, delObj.Name) + go r.gsmInvitedNodeOp(oshInst, delObj.Name) } } - if e.Object.GetLabels()["type"] == "Catalog" && e.Object.GetLabels()["app"] == "OracleSharding" && e.Object.GetLabels()["oralabel"] == oshInst.Instance.Name { + if e.Object.GetLabels()["type"] == "Catalog" && e.Object.GetLabels()["app"] == "OracleSharding" && e.Object.GetLabels()["oralabel"] == oshInst.Name { if delObj.DeletionTimestamp != nil { - go r.gsmInvitedNodeOp(oshInst.Instance, delObj.Name) + go r.gsmInvitedNodeOp(oshInst, delObj.Name) } } - } - } } - return true }, } } // ================ Function to check secret update============= -func (r *ShardingDatabaseReconciler) UpdateSecret(instance *databasev1alpha1.ShardingDatabase, kClient client.Client, logger logr.Logger) (ctrl.Result, error) { +func (r *ShardingDatabaseReconciler) UpdateSecret(instance *databasev4.ShardingDatabase, kClient client.Client, logger logr.Logger) (ctrl.Result, error) { sc := &corev1.Secret{} //var err error @@ -523,7 +514,7 @@ func (r *ShardingDatabaseReconciler) UpdateSecret(instance *databasev1alpha1.Sha // Reading a Secret var err error = kClient.Get(context.TODO(), types.NamespacedName{ Name: instance.Spec.DbSecret.Name, - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, }, sc) if err != nil { @@ -534,43 +525,61 @@ func (r *ShardingDatabaseReconciler) UpdateSecret(instance *databasev1alpha1.Sha } // ================== Function to get the Notification controller ============== -func (r *ShardingDatabaseReconciler) getOnsConfigProvider(instance *databasev1alpha1.ShardingDatabase, idx int, -) { +func (r *ShardingDatabaseReconciler) getOnsConfigProvider(instance *databasev4.ShardingDatabase) { var err error - if instance.Spec.DbSecret.NsConfigMap != "" && instance.Spec.DbSecret.NsSecret != "" && r.osh[idx].onsProviderFlag != true { + if instance.Spec.DbSecret.NsConfigMap != "" && instance.Spec.DbSecret.NsSecret != "" && oshMap[instance.Name].OnsProviderFlag != true { cmName := instance.Spec.DbSecret.NsConfigMap secName := instance.Spec.DbSecret.NsSecret shardingv1.LogMessages("DEBUG", "Received parameters are "+shardingv1.GetFmtStr(cmName)+","+shardingv1.GetFmtStr(secName), nil, instance, r.Log) region, user, tenancy, passphrase, fingerprint, topicid := shardingv1.ReadConfigMap(cmName, instance, r.Client, r.Log) privatekey := shardingv1.ReadSecret(secName, instance, r.Client, r.Log) - r.osh[idx].topicid = topicid - r.osh[idx].onsProvider = common.NewRawConfigurationProvider(tenancy, user, region, fingerprint, privatekey, &passphrase) - r.osh[idx].rclient, err = ons.NewNotificationDataPlaneClientWithConfigurationProvider(r.osh[idx].onsProvider) + + oshMap[instance.Name].Topicid = topicid + oshMap[instance.Name].OnsProvider = common.NewRawConfigurationProvider(tenancy, user, region, fingerprint, privatekey, &passphrase) + //VV instance.Spec.TopicId = topicid + oshMap[instance.Name].Rclient, err = ons.NewNotificationDataPlaneClientWithConfigurationProvider(oshMap[instance.Name].OnsProvider) if err != nil { msg := "Error occurred in getting the OCI notification service based client." - r.osh[idx].onsProviderFlag = false + oshMap[instance.Name].OnsProviderFlag = false r.Log.Error(err, msg) shardingv1.LogMessages("Error", msg, nil, instance, r.Log) } else { - r.osh[idx].onsProviderFlag = true + oshMap[instance.Name].OnsProviderFlag = true } + } +} +func (r ShardingDatabaseReconciler) marshalOnsInfo(instance *databasev4.ShardingDatabase) (OnsStatus, error) { + onsData := OnsStatus{} + specBytes, err := instance.GetLastSuccessfulOnsInfo() + if err != nil { + shardingv1.LogMessages("Error", "error occurred while getting the data from getLastSuccessfulOnsInfo", nil, instance, r.Log) + return onsData, err + } else { + shardingv1.LogMessages("Error", "error occurred while getting the data from getLastSuccessfulOnsInfo and unmarshaling the object", nil, instance, r.Log) + err := json.Unmarshal(specBytes, &onsData) + if err != nil { + return onsData, err + } } + return onsData, nil } // ================== Function the Message ============== -func (r *ShardingDatabaseReconciler) sendMessage(instance *databasev1alpha1.ShardingDatabase, title string, body string) { - idx, instFlag := r.checkProvInstance(instance) +func (r *ShardingDatabaseReconciler) sendMessage(instance *databasev4.ShardingDatabase, title string, body string) { + instFlag := r.checkProvInstance(instance) if instFlag { - if r.osh[idx].onsProviderFlag { - shardingv1.SendNotification(title, body, instance, r.osh[idx].topicid, r.osh[idx].rclient, r.Log) + shardingv1.LogMessages("INFO", "sendMessage():instFlag true", nil, instance, r.Log) + if oshMap[instance.Name].OnsProviderFlag { + shardingv1.LogMessages("INFO", "sendMessage():OnsProviderFlag true", nil, instance, r.Log) + shardingv1.SendNotification(title, body, instance, oshMap[instance.Name].Topicid, oshMap[instance.Name].Rclient, r.Log) } } } -func (r *ShardingDatabaseReconciler) publishEvents(instance *databasev1alpha1.ShardingDatabase, eventMsg string, state string) { +func (r *ShardingDatabaseReconciler) publishEvents(instance *databasev4.ShardingDatabase, eventMsg string, state string) { - if state == string(databasev1alpha1.AvailableState) || state == string(databasev1alpha1.AddingShardState) || state == string(databasev1alpha1.ShardOnlineState) || state == string(databasev1alpha1.ProvisionState) || state == string(databasev1alpha1.DeletingState) || state == string(databasev1alpha1.Terminated) { + if state == string(databasev4.AvailableState) || state == string(databasev4.AddingShardState) || state == string(databasev4.ShardOnlineState) || state == string(databasev4.ProvisionState) || state == string(databasev4.DeletingState) || state == string(databasev4.Terminated) { r.Recorder.Eventf(instance, corev1.EventTypeNormal, "State Change", eventMsg) } else { r.Recorder.Eventf(instance, corev1.EventTypeWarning, "State Change", eventMsg) @@ -580,7 +589,7 @@ func (r *ShardingDatabaseReconciler) publishEvents(instance *databasev1alpha1.Sh } // ================== Function to check insytance deletion timestamp and activate the finalizer code ======== -func (r *ShardingDatabaseReconciler) finalizerShardingDatabaseInstance(instance *databasev1alpha1.ShardingDatabase, +func (r *ShardingDatabaseReconciler) finalizerShardingDatabaseInstance(instance *databasev4.ShardingDatabase, ) (error, bool) { isProvOShardToBeDeleted := instance.GetDeletionTimestamp() != nil @@ -619,8 +628,8 @@ func (r *ShardingDatabaseReconciler) finalizerShardingDatabaseInstance(instance } // ========================== FInalizer Section =================== -func (r *ShardingDatabaseReconciler) addFinalizer(instance *databasev1alpha1.ShardingDatabase) error { - reqLogger := r.Log.WithValues("instance.Spec.Namespace", instance.Spec.Namespace, "instance.Name", instance.Name) +func (r *ShardingDatabaseReconciler) addFinalizer(instance *databasev4.ShardingDatabase) error { + reqLogger := r.Log.WithValues("instance.Namespace", instance.Namespace, "instance.Name", instance.Name) controllerutil.AddFinalizer(instance, shardingv1.ShardingDatabaseFinalizer) // Update CR @@ -632,7 +641,7 @@ func (r *ShardingDatabaseReconciler) addFinalizer(instance *databasev1alpha1.Sha return nil } -func (r *ShardingDatabaseReconciler) finalizeShardingDatabase(instance *databasev1alpha1.ShardingDatabase) error { +func (r *ShardingDatabaseReconciler) finalizeShardingDatabase(instance *databasev4.ShardingDatabase) error { // TODO(user): Add the cleanup steps that the operator needs to do before the CR // can be deleted. Examples of finalizers include performing backups and deleting // resources that are not owned by this CR, like a PVC. @@ -641,10 +650,9 @@ func (r *ShardingDatabaseReconciler) finalizeShardingDatabase(instance *database var err error var pvcName string - idx, _ := r.checkProvInstance(instance) + r.checkProvInstance(instance) sfSetFound := &appsv1.StatefulSet{} svcFound := &corev1.Service{} - r.osh[idx].deltopology = true if len(instance.Spec.Shard) > 0 { for i = 0; i < int32(len(instance.Spec.Shard)); i++ { OraShardSpex := instance.Spec.Shard[i] @@ -831,41 +839,29 @@ func (r *ShardingDatabaseReconciler) finalizeShardingDatabase(instance *database } } - r.osh[idx].deltopology = false - //r.osh[idx].addSem.Release(1) - //r.osh[idx].delSem.Release(1) - //instance1 := &shardingv1alpha1.ProvShard{} - r.osh[idx].Instance = &databasev1alpha1.ShardingDatabase{} - - //r.osh[idx] = nil + oshMap[instance.Name].Instance = &databasev4.ShardingDatabase{} return nil } -//============== - // Get the current instance -func (r *ShardingDatabaseReconciler) checkProvInstance(instance *databasev1alpha1.ShardingDatabase, -) (int, bool) { +func (r *ShardingDatabaseReconciler) checkProvInstance(instance *databasev4.ShardingDatabase, +) bool { var status bool = false - var idx int - for i := 0; i < len(r.osh); i++ { - idx = i - if r.osh[i] != nil { - if !r.osh[i].deltopology { - if r.osh[i].Instance.Name == instance.Name { - status = true - break - } - } + if oshMap[instance.Name] != nil { + title := "checkProvInstance()" + message := "oshMap.Instance.Name=[" + oshMap[instance.Name].Instance.Name + "]. instance.Name=[" + instance.Name + "]." + shardingv1.LogMessages("INFO", title+":"+message, nil, instance, r.Log) + if oshMap[instance.Name].Instance.Name == instance.Name { + status = true } } - return idx, status + return status } // =========== validate Specs ============ -func (r *ShardingDatabaseReconciler) validateSpex(instance *databasev1alpha1.ShardingDatabase, idx int) error { +func (r *ShardingDatabaseReconciler) validateSpex(instance *databasev4.ShardingDatabase) error { var eventMsg string var eventErr string = "Spec Error" @@ -880,7 +876,7 @@ func (r *ShardingDatabaseReconciler) validateSpex(instance *databasev1alpha1.Sha if lastSuccSpec == nil { // Logic to check if inital Spec is good or not - err = r.checkShardingType(instance, idx) + err = r.checkShardingType(instance) if err != nil { return err } @@ -909,11 +905,6 @@ func (r *ShardingDatabaseReconciler) validateSpex(instance *databasev1alpha1.Sha } else { // if the last sucessful spec is not nil // check the parameters which cannot be changed - if lastSuccSpec.Namespace != instance.Spec.Namespace { - eventMsg = "ShardingDatabase CRD resource " + shardingv1.GetFmtStr(instance.Name) + " namespace changed from " + shardingv1.GetFmtStr(lastSuccSpec.Namespace) + " to " + shardingv1.GetFmtStr(instance.Spec.Namespace) + ". This change is not allowed." - r.Recorder.Eventf(instance, corev1.EventTypeWarning, eventErr, eventMsg) - return fmt.Errorf("instance spec has changed and namespace change is not supported") - } if lastSuccSpec.DbImage != instance.Spec.DbImage { eventMsg = "ShardingDatabase CRD resource " + shardingv1.GetFmtStr(instance.Name) + " DBImage changed from " + shardingv1.GetFmtStr(lastSuccSpec.DbImage) + " to " + shardingv1.GetFmtStr(instance.Spec.DbImage) + ". This change is not allowed." @@ -950,7 +941,7 @@ func (r *ShardingDatabaseReconciler) validateSpex(instance *databasev1alpha1.Sha return nil } -func (r *ShardingDatabaseReconciler) checkShardingType(instance *databasev1alpha1.ShardingDatabase, idx int) error { +func (r *ShardingDatabaseReconciler) checkShardingType(instance *databasev4.ShardingDatabase) error { var i, k int32 var regionFlag bool @@ -973,7 +964,7 @@ func (r *ShardingDatabaseReconciler) checkShardingType(instance *databasev1alpha // Check the ShardGroups/ Shard Space and Shard group Name // checkShrdGSR is Shardgroup/ShardSpace/ShardRegion -func (r *ShardingDatabaseReconciler) checkShardSpace(instance *databasev1alpha1.ShardingDatabase, OraShardSpex databasev1alpha1.ShardSpec) error { +func (r *ShardingDatabaseReconciler) checkShardSpace(instance *databasev4.ShardingDatabase, OraShardSpex databasev4.ShardSpec) error { if instance.Spec.ShardingType != "" { // Check for the Sharding Type and if it is USER do following @@ -992,7 +983,7 @@ func (r *ShardingDatabaseReconciler) checkShardSpace(instance *databasev1alpha1. // Check the ShardGroups/ Shard Space and Shard group Name // checkShrdGSR is Shardgroup/ShardSpace/ShardRegion -func (r *ShardingDatabaseReconciler) checkShardGroup(instance *databasev1alpha1.ShardingDatabase, OraShardSpex databasev1alpha1.ShardSpec) error { +func (r *ShardingDatabaseReconciler) checkShardGroup(instance *databasev4.ShardingDatabase, OraShardSpex databasev4.ShardSpec) error { // We need to check Shard Region and Shard Group for ShardingType='SYSTEM' and 'NATIVE' if strings.TrimSpace(strings.ToUpper(instance.Spec.ShardingType)) != "USER" { @@ -1011,7 +1002,7 @@ func (r *ShardingDatabaseReconciler) checkShardGroup(instance *databasev1alpha1. // Compare GSM Env Variables -func (r *ShardingDatabaseReconciler) comapreGsmEnvVariables(instance *databasev1alpha1.ShardingDatabase, lastSuccSpec *databasev1alpha1.ShardingDatabaseSpec) bool { +func (r *ShardingDatabaseReconciler) comapreGsmEnvVariables(instance *databasev4.ShardingDatabase, lastSuccSpec *databasev4.ShardingDatabaseSpec) bool { var eventMsg string var eventErr string = "Spec Error" var i, j int32 @@ -1036,7 +1027,7 @@ func (r *ShardingDatabaseReconciler) comapreGsmEnvVariables(instance *databasev1 return true } -func (r *ShardingDatabaseReconciler) comapreCatalogEnvVariables(instance *databasev1alpha1.ShardingDatabase, lastSuccSpec *databasev1alpha1.ShardingDatabaseSpec) bool { +func (r *ShardingDatabaseReconciler) comapreCatalogEnvVariables(instance *databasev4.ShardingDatabase, lastSuccSpec *databasev4.ShardingDatabaseSpec) bool { var eventMsg string var eventErr string = "Spec Error" var i, j int32 @@ -1061,7 +1052,7 @@ func (r *ShardingDatabaseReconciler) comapreCatalogEnvVariables(instance *databa return true } -func (r *ShardingDatabaseReconciler) comapreShardEnvVariables(instance *databasev1alpha1.ShardingDatabase, lastSuccSpec *databasev1alpha1.ShardingDatabaseSpec) bool { +func (r *ShardingDatabaseReconciler) comapreShardEnvVariables(instance *databasev4.ShardingDatabase, lastSuccSpec *databasev4.ShardingDatabaseSpec) bool { var eventMsg string var eventErr string = "Spec Error" var i, j int32 @@ -1088,21 +1079,21 @@ func (r *ShardingDatabaseReconciler) comapreShardEnvVariables(instance *database //===== Set the CRD resource life cycle state ======== -func (r *ShardingDatabaseReconciler) setCrdLifeCycleState(instance *databasev1alpha1.ShardingDatabase, result *ctrl.Result, err *error, stateType *string) { +func (r *ShardingDatabaseReconciler) setCrdLifeCycleState(instance *databasev4.ShardingDatabase, result *ctrl.Result, err *error, stateType *string) { var metaCondition metav1.Condition var updateFlag = false if *stateType == "ReconcileWaiting" { - metaCondition = shardingv1.GetMetaCondition(instance, result, err, *stateType, string(databasev1alpha1.CrdReconcileWaitingReason)) + metaCondition = shardingv1.GetMetaCondition(instance, result, err, *stateType, string(databasev4.CrdReconcileWaitingReason)) updateFlag = true } else if *stateType == "ReconcileComplete" { - metaCondition = shardingv1.GetMetaCondition(instance, result, err, *stateType, string(databasev1alpha1.CrdReconcileCompleteReason)) + metaCondition = shardingv1.GetMetaCondition(instance, result, err, *stateType, string(databasev4.CrdReconcileCompleteReason)) updateFlag = true } else if result.Requeue { - metaCondition = shardingv1.GetMetaCondition(instance, result, err, string(databasev1alpha1.CrdReconcileQueuedState), string(databasev1alpha1.CrdReconcileQueuedReason)) + metaCondition = shardingv1.GetMetaCondition(instance, result, err, string(databasev4.CrdReconcileQueuedState), string(databasev4.CrdReconcileQueuedReason)) updateFlag = true } else if *err != nil { - metaCondition = shardingv1.GetMetaCondition(instance, result, err, string(databasev1alpha1.CrdReconcileErrorState), string(databasev1alpha1.CrdReconcileErrorReason)) + metaCondition = shardingv1.GetMetaCondition(instance, result, err, string(databasev4.CrdReconcileErrorState), string(databasev4.CrdReconcileErrorReason)) updateFlag = true } else { @@ -1119,7 +1110,7 @@ func (r *ShardingDatabaseReconciler) setCrdLifeCycleState(instance *databasev1al } -func (r *ShardingDatabaseReconciler) validateGsmnCatalog(instance *databasev1alpha1.ShardingDatabase) error { +func (r *ShardingDatabaseReconciler) validateGsmnCatalog(instance *databasev4.ShardingDatabase) error { var err error _, _, err = r.validateCatalog(instance) if err != nil { @@ -1132,7 +1123,7 @@ func (r *ShardingDatabaseReconciler) validateGsmnCatalog(instance *databasev1alp return nil } -func (r *ShardingDatabaseReconciler) validateGsm(instance *databasev1alpha1.ShardingDatabase, +func (r *ShardingDatabaseReconciler) validateGsm(instance *databasev4.ShardingDatabase, ) (*appsv1.StatefulSet, *corev1.Pod, error) { //var err error var i int32 @@ -1160,7 +1151,7 @@ func (r *ShardingDatabaseReconciler) validateGsm(instance *databasev1alpha1.Shar return gsmSfSet, gsmPod, fmt.Errorf("GSM is not ready") } -func (r *ShardingDatabaseReconciler) validateInvidualGsm(instance *databasev1alpha1.ShardingDatabase, OraGsmSpex databasev1alpha1.GsmSpec, specId int, +func (r *ShardingDatabaseReconciler) validateInvidualGsm(instance *databasev4.ShardingDatabase, OraGsmSpex databasev4.GsmSpec, specId int, ) (*appsv1.StatefulSet, *corev1.Pod, error) { //var err error var i int32 @@ -1172,42 +1163,44 @@ func (r *ShardingDatabaseReconciler) validateInvidualGsm(instance *databasev1alp podList := &corev1.PodList{} var isPodExist bool + // VV : uninitialised variable 'i' being used. + i = int32(specId) gsmSfSet, err = shardingv1.CheckSfset(OraGsmSpex.Name, instance, r.Client) if err != nil { msg = "Unable to find GSM statefulset " + shardingv1.GetFmtStr(OraGsmSpex.Name) + "." - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - r.updateGsmStatus(instance, int(i), string(databasev1alpha1.StatefulSetNotFound)) + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) + r.updateGsmStatus(instance, int(i), string(databasev4.StatefulSetNotFound)) return gsmSfSet, gsmPod, err } podList, err = shardingv1.GetPodList(gsmSfSet.Name, "GSM", instance, r.Client) if err != nil { msg = "Unable to find any pod in statefulset " + shardingv1.GetFmtStr(gsmSfSet.Name) + "." - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - r.updateGsmStatus(instance, int(i), string(databasev1alpha1.PodNotFound)) + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) + r.updateGsmStatus(instance, int(i), string(databasev4.PodNotFound)) return gsmSfSet, gsmPod, err } isPodExist, gsmPod = shardingv1.PodListValidation(podList, gsmSfSet.Name, instance, r.Client) if !isPodExist { msg = "Unable to validate GSM " + shardingv1.GetFmtStr(gsmPod.Name) + " pod. GSM pod doesn't seems to be ready to accept the commands." - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - r.updateGsmStatus(instance, int(i), string(databasev1alpha1.PodNotReadyState)) + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) + r.updateGsmStatus(instance, int(i), string(databasev4.PodNotReadyState)) return gsmSfSet, gsmPod, fmt.Errorf("pod doesn't exist") } err = shardingv1.CheckGsmStatus(gsmPod.Name, instance, r.kubeClient, r.kubeConfig, r.Log) if err != nil { msg = "Unable to validate GSM director. GSM director doesn't seems to be ready to accept the commands." - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - r.updateGsmStatus(instance, int(i), string(databasev1alpha1.ProvisionState)) + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) + r.updateGsmStatus(instance, int(i), string(databasev4.ProvisionState)) return gsmSfSet, gsmPod, err } - r.updateGsmStatus(instance, specId, string(databasev1alpha1.AvailableState)) + r.updateGsmStatus(instance, specId, string(databasev4.AvailableState)) return gsmSfSet, gsmPod, nil } -func (r *ShardingDatabaseReconciler) validateCatalog(instance *databasev1alpha1.ShardingDatabase, +func (r *ShardingDatabaseReconciler) validateCatalog(instance *databasev4.ShardingDatabase, ) (*appsv1.StatefulSet, *corev1.Pod, error) { catalogSfSet := &appsv1.StatefulSet{} @@ -1236,7 +1229,7 @@ func (r *ShardingDatabaseReconciler) validateCatalog(instance *databasev1alpha1. } // === Validate Individual Catalog -func (r *ShardingDatabaseReconciler) validateInvidualCatalog(instance *databasev1alpha1.ShardingDatabase, OraCatalogSpex databasev1alpha1.CatalogSpec, specId int, +func (r *ShardingDatabaseReconciler) validateInvidualCatalog(instance *databasev4.ShardingDatabase, OraCatalogSpex databasev4.CatalogSpec, specId int, ) (*appsv1.StatefulSet, *corev1.Pod, error) { var err error @@ -1248,40 +1241,40 @@ func (r *ShardingDatabaseReconciler) validateInvidualCatalog(instance *databasev catalogSfSet, err = shardingv1.CheckSfset(OraCatalogSpex.Name, instance, r.Client) if err != nil { msg := "Unable to find Catalog statefulset " + shardingv1.GetFmtStr(OraCatalogSpex.Name) + "." - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - r.updateCatalogStatus(instance, specId, string(databasev1alpha1.StatefulSetNotFound)) + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) + r.updateCatalogStatus(instance, specId, string(databasev4.StatefulSetNotFound)) return catalogSfSet, catalogPod, err } podList, err = shardingv1.GetPodList(catalogSfSet.Name, "CATALOG", instance, r.Client) if err != nil { msg := "Unable to find any pod in statefulset " + shardingv1.GetFmtStr(catalogSfSet.Name) + "." - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - r.updateCatalogStatus(instance, specId, string(databasev1alpha1.PodNotFound)) + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) + r.updateCatalogStatus(instance, specId, string(databasev4.PodNotFound)) return catalogSfSet, catalogPod, err } isPodExist, catalogPod = shardingv1.PodListValidation(podList, catalogSfSet.Name, instance, r.Client) if !isPodExist { msg := "Unable to validate Catalog " + shardingv1.GetFmtStr(catalogSfSet.Name) + " pod. Catalog pod doesn't seems to be ready to accept the commands." - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - r.updateCatalogStatus(instance, specId, string(databasev1alpha1.PodNotReadyState)) + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) + r.updateCatalogStatus(instance, specId, string(databasev4.PodNotReadyState)) return catalogSfSet, catalogPod, fmt.Errorf("Pod doesn't exist") } err = shardingv1.ValidateDbSetup(catalogPod.Name, instance, r.kubeClient, r.kubeConfig, r.Log) if err != nil { msg := "Unable to validate Catalog. Catalog doesn't seems to be ready to accept the commands." - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - r.updateCatalogStatus(instance, specId, string(databasev1alpha1.ProvisionState)) + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) + r.updateCatalogStatus(instance, specId, string(databasev4.ProvisionState)) return catalogSfSet, catalogPod, err } - r.updateCatalogStatus(instance, specId, string(databasev1alpha1.AvailableState)) + r.updateCatalogStatus(instance, specId, string(databasev4.AvailableState)) return catalogSfSet, catalogPod, nil } // ======= Function to validate Shard -func (r *ShardingDatabaseReconciler) validateShard(instance *databasev1alpha1.ShardingDatabase, OraShardSpex databasev1alpha1.ShardSpec, specId int, +func (r *ShardingDatabaseReconciler) validateShard(instance *databasev4.ShardingDatabase, OraShardSpex databasev4.ShardSpec, specId int, ) (*appsv1.StatefulSet, *corev1.Pod, error) { var err error @@ -1291,39 +1284,39 @@ func (r *ShardingDatabaseReconciler) validateShard(instance *databasev1alpha1.Sh shardSfSet, err = shardingv1.CheckSfset(OraShardSpex.Name, instance, r.Client) if err != nil { msg := "Unable to find Shard statefulset " + shardingv1.GetFmtStr(OraShardSpex.Name) + "." - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - r.updateShardStatus(instance, specId, string(databasev1alpha1.StatefulSetNotFound)) + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) + r.updateShardStatus(instance, specId, string(databasev4.StatefulSetNotFound)) return shardSfSet, shardPod, err } podList, err := shardingv1.GetPodList(shardSfSet.Name, "SHARD", instance, r.Client) if err != nil { msg := "Unable to find any pod in statefulset " + shardingv1.GetFmtStr(shardSfSet.Name) + "." - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - r.updateShardStatus(instance, specId, string(databasev1alpha1.PodNotFound)) + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) + r.updateShardStatus(instance, specId, string(databasev4.PodNotFound)) return shardSfSet, shardPod, err } isPodExist, shardPod := shardingv1.PodListValidation(podList, shardSfSet.Name, instance, r.Client) if !isPodExist { msg := "Unable to validate Shard " + shardingv1.GetFmtStr(shardPod.Name) + " pod. Shard pod doesn't seems to be ready to accept the commands." - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - r.updateShardStatus(instance, specId, string(databasev1alpha1.PodNotReadyState)) + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) + r.updateShardStatus(instance, specId, string(databasev4.PodNotReadyState)) return shardSfSet, shardPod, err } err = shardingv1.ValidateDbSetup(shardPod.Name, instance, r.kubeClient, r.kubeConfig, r.Log) if err != nil { msg := "Unable to validate shard. Shard doesn't seems to be ready to accept the commands." - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) - r.updateShardStatus(instance, specId, string(databasev1alpha1.ProvisionState)) + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) + r.updateShardStatus(instance, specId, string(databasev4.ProvisionState)) return shardSfSet, shardPod, err } - r.updateShardStatus(instance, specId, string(databasev1alpha1.AvailableState)) + r.updateShardStatus(instance, specId, string(databasev4.AvailableState)) return shardSfSet, shardPod, nil } // This function updates the shard topology over all -func (r *ShardingDatabaseReconciler) updateShardTopologyStatus(instance *databasev1alpha1.ShardingDatabase) { +func (r *ShardingDatabaseReconciler) updateShardTopologyStatus(instance *databasev4.ShardingDatabase) { //shardPod := &corev1.Pod{} //gsmSfSet := &appsv1.StatefulSet{} gsmPod := &corev1.Pod{} @@ -1340,7 +1333,7 @@ func (r *ShardingDatabaseReconciler) updateShardTopologyStatus(instance *databas } -func (r *ShardingDatabaseReconciler) updateShardTopologyShardsInGsm(instance *databasev1alpha1.ShardingDatabase, gsmPod *corev1.Pod) { +func (r *ShardingDatabaseReconciler) updateShardTopologyShardsInGsm(instance *databasev4.ShardingDatabase, gsmPod *corev1.Pod) { shardSfSet := &appsv1.StatefulSet{} //shardPod := &corev1.Pod{} //gsmSfSet := &appsv1.StatefulSet{} @@ -1366,7 +1359,7 @@ func (r *ShardingDatabaseReconciler) updateShardTopologyShardsInGsm(instance *da } } -func (r *ShardingDatabaseReconciler) updateGsmStatus(instance *databasev1alpha1.ShardingDatabase, specIdx int, state string) { +func (r *ShardingDatabaseReconciler) updateGsmStatus(instance *databasev4.ShardingDatabase, specIdx int, state string) { var currState string var eventMsg string @@ -1396,7 +1389,7 @@ func (r *ShardingDatabaseReconciler) updateGsmStatus(instance *databasev1alpha1. } } -func (r *ShardingDatabaseReconciler) updateCatalogStatus(instance *databasev1alpha1.ShardingDatabase, specIdx int, state string) { +func (r *ShardingDatabaseReconciler) updateCatalogStatus(instance *databasev4.ShardingDatabase, specIdx int, state string) { var eventMsg string var currState string var eventMsgFlag = true @@ -1404,7 +1397,7 @@ func (r *ShardingDatabaseReconciler) updateCatalogStatus(instance *databasev1alp name := instance.Spec.Catalog[specIdx].Name if len(instance.Status.Catalog) > 0 { - currState = shardingv1.GetGsmCatalogStatusKey(instance, name+"_"+string(databasev1alpha1.State)) + currState = shardingv1.GetGsmCatalogStatusKey(instance, name+"_"+string(databasev4.State)) if currState == state { eventMsgFlag = false } @@ -1423,14 +1416,14 @@ func (r *ShardingDatabaseReconciler) updateCatalogStatus(instance *databasev1alp } } -func (r *ShardingDatabaseReconciler) updateShardStatus(instance *databasev1alpha1.ShardingDatabase, specIdx int, state string) { +func (r *ShardingDatabaseReconciler) updateShardStatus(instance *databasev4.ShardingDatabase, specIdx int, state string) { var eventMsg string var currState string var eventMsgFlag = true name := instance.Spec.Shard[specIdx].Name if len(instance.Status.Shard) > 0 { - currState = shardingv1.GetGsmShardStatusKey(instance, name+"_"+string(databasev1alpha1.State)) + currState = shardingv1.GetGsmShardStatusKey(instance, name+"_"+string(databasev4.State)) if currState == state { eventMsgFlag = false } @@ -1449,7 +1442,7 @@ func (r *ShardingDatabaseReconciler) updateShardStatus(instance *databasev1alpha } } -func (r *ShardingDatabaseReconciler) updateGsmShardStatus(instance *databasev1alpha1.ShardingDatabase, name string, state string) { +func (r *ShardingDatabaseReconciler) updateGsmShardStatus(instance *databasev4.ShardingDatabase, name string, state string) { var eventMsg string var currState string var eventMsgFlag = true @@ -1480,7 +1473,7 @@ func (r *ShardingDatabaseReconciler) updateGsmShardStatus(instance *databasev1al } // This function add the Primary Shards in GSM -func (r *ShardingDatabaseReconciler) addPrimaryShards(instance *databasev1alpha1.ShardingDatabase, idx int) error { +func (r *ShardingDatabaseReconciler) addPrimaryShards(instance *databasev4.ShardingDatabase) error { //var result ctrl.Result var result ctrl.Result var i int32 @@ -1509,7 +1502,7 @@ func (r *ShardingDatabaseReconciler) addPrimaryShards(instance *databasev1alpha1 if !shardingv1.CheckIsDeleteFlag(OraShardSpex.IsDelete, instance, r.Log) { if setLifeCycleFlag != true { setLifeCycleFlag = true - stateType := string(databasev1alpha1.CrdReconcileWaitingState) + stateType := string(databasev4.CrdReconcileWaitingState) r.setCrdLifeCycleState(instance, &result, &err, &stateType) } // 1st Step is to check if Shard is in good state if not then just continue @@ -1551,14 +1544,14 @@ func (r *ShardingDatabaseReconciler) addPrimaryShards(instance *databasev1alpha1 last := fileName[strings.LastIndex(fileName, "/")+1:] fileName1 := last fsLoc := shardingv1.TmpLoc + "/" + fileName1 - _, _, _, err = shardingv1.KctlCopyFile(r.kubeClient, r.kubeConfig, instance, configrest, kclientset, r.Log, fmt.Sprintf("%s/%s:/%s", instance.Spec.Namespace, gsmPod.Name, fileName), fsLoc, "") + _, _, _, err = shardingv1.KctlCopyFile(r.kubeClient, r.kubeConfig, instance, configrest, kclientset, r.Log, fmt.Sprintf("%s/%s:/%s", instance.Namespace, gsmPod.Name, fileName), fsLoc, "") if err != nil { fmt.Printf("failed to copy file") //return err } // Copying it to Shard Pod - _, _, _, err = shardingv1.KctlCopyFile(r.kubeClient, r.kubeConfig, instance, configrest, kclientset, r.Log, fsLoc, fmt.Sprintf("%s/%s:/%s", instance.Spec.Namespace, OraShardSpex.Name+"-0", fsLoc), "") + _, _, _, err = shardingv1.KctlCopyFile(r.kubeClient, r.kubeConfig, instance, configrest, kclientset, r.Log, fsLoc, fmt.Sprintf("%s/%s:/%s", instance.Namespace, OraShardSpex.Name+"-0", fsLoc), "") if err != nil { fmt.Printf("failed to copy file") //return err @@ -1569,18 +1562,19 @@ func (r *ShardingDatabaseReconciler) addPrimaryShards(instance *databasev1alpha1 // If the shard doesn't exist in GSM then just add the shard statefulset and update GSM shard status // ADD Shard in GSM - r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev1alpha1.AddingShardState)) + r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev4.AddingShardState)) err = shardingv1.AddShardInGsm(gsmPod.Name, sparams, instance, r.kubeClient, r.kubeConfig, r.Log) if err != nil { - r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev1alpha1.AddingShardErrorState)) - title = "Shard Addition Failure" - message = "Error occurred during shard " + shardingv1.GetFmtStr(OraShardSpex.Name) + " addition." - shardingv1.LogMessages("INFO", title+":"+message, nil, instance, r.Log) - if sentFailMsg[OraShardSpex.Name] != true { + r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev4.AddingShardErrorState)) + title = instance.Namespace + ":Shard Addition Failure" + message = "TopicId:" + oshMap[instance.Name].Topicid + ":Error occurred during shard " + shardingv1.GetFmtStr(OraShardSpex.Name) + " addition." + shardingv1.LogMessages("Error", title+":"+message, nil, instance, r.Log) + msgKey := instance.Namespace + "-" + OraShardSpex.Name + if sentFailMsg[msgKey] != true { r.sendMessage(instance, title, message) } - sentFailMsg[OraShardSpex.Name] = true - sentCompleteMsg[OraShardSpex.Name] = false + sentFailMsg[msgKey] = true + sentCompleteMsg[msgKey] = false deployFlag = false } } @@ -1606,7 +1600,7 @@ func (r *ShardingDatabaseReconciler) addPrimaryShards(instance *databasev1alpha1 } // This function Check the online shard -func (r *ShardingDatabaseReconciler) verifyShards(instance *databasev1alpha1.ShardingDatabase, gsmPod *corev1.Pod, shardSfSet *appsv1.StatefulSet, OraShardSpex databasev1alpha1.ShardSpec) error { +func (r *ShardingDatabaseReconciler) verifyShards(instance *databasev4.ShardingDatabase, gsmPod *corev1.Pod, shardSfSet *appsv1.StatefulSet, OraShardSpex databasev4.ShardSpec) error { //var result ctrl.Result //var i int32 var err error @@ -1619,37 +1613,38 @@ func (r *ShardingDatabaseReconciler) verifyShards(instance *databasev1alpha1.Sha if err != nil { // If the shard doesn't exist in GSM then just delete the shard statefulset and update GSM shard status /// Terminate state means we will remove teh shard entry from GSM shard status - r.updateGsmShardStatus(instance, shardSfSet.Name, string(databasev1alpha1.ShardOnlineErrorState)) + r.updateGsmShardStatus(instance, shardSfSet.Name, string(databasev4.ShardOnlineErrorState)) if strings.ToUpper(instance.Spec.ReplicationType) != "NATIVE" { shardingv1.CancelChunksInGsm(gsmPod.Name, sparams, instance, r.kubeClient, r.kubeConfig, r.Log) } return err } oldStateStr := shardingv1.GetGsmShardStatus(instance, shardSfSet.Name) - r.updateGsmShardStatus(instance, shardSfSet.Name, string(databasev1alpha1.ShardOnlineState)) + r.updateGsmShardStatus(instance, shardSfSet.Name, string(databasev4.ShardOnlineState)) // Following logic will sent a email only once - if oldStateStr != string(databasev1alpha1.ShardOnlineState) { - title = "Shard Addition Completed" - message = "Shard addition completed for shard " + shardingv1.GetFmtStr(shardSfSet.Name) + " in GSM." + if oldStateStr != string(databasev4.ShardOnlineState) { + title = instance.Namespace + ":Shard Addition Completed" + message = "TopicId:" + oshMap[instance.Name].Topicid + ":Shard addition completed for shard " + shardingv1.GetFmtStr(shardSfSet.Name) + " in GSM." shardingv1.LogMessages("INFO", title+":"+message, nil, instance, r.Log) - if sentCompleteMsg[shardSfSet.Name] != true { + msgKey := instance.Namespace + "-" + shardSfSet.Name + if sentCompleteMsg[msgKey] != true { r.sendMessage(instance, title, message) } - sentCompleteMsg[shardSfSet.Name] = true - sentFailMsg[shardSfSet.Name] = false + sentCompleteMsg[msgKey] = true + sentFailMsg[msgKey] = false } return nil } -func (r *ShardingDatabaseReconciler) addStandbyShards(instance *databasev1alpha1.ShardingDatabase, idx int) error { +func (r *ShardingDatabaseReconciler) addStandbyShards(instance *databasev4.ShardingDatabase) error { //var result ctrl.Result return nil } // ========== Delete Shard Section==================== -func (r *ShardingDatabaseReconciler) delGsmShard(instance *databasev1alpha1.ShardingDatabase, idx int) error { +func (r *ShardingDatabaseReconciler) delGsmShard(instance *databasev4.ShardingDatabase) error { var result ctrl.Result var i int32 var err error @@ -1671,7 +1666,7 @@ func (r *ShardingDatabaseReconciler) delGsmShard(instance *databasev1alpha1.Shar if shardingv1.CheckIsDeleteFlag(OraShardSpex.IsDelete, instance, r.Log) { if setLifeCycleFlag != true { setLifeCycleFlag = true - stateType := string(databasev1alpha1.CrdReconcileWaitingState) + stateType := string(databasev4.CrdReconcileWaitingState) r.setCrdLifeCycleState(instance, &result, &err, &stateType) } // Step 1st to check if GSM is in good state if not then just return because you can't do anything @@ -1699,18 +1694,18 @@ func (r *ShardingDatabaseReconciler) delGsmShard(instance *databasev1alpha1.Shar // If the shard doesn't exist in GSM then just delete the shard statefulset and update GSM shard status /// Terminate state means we will remove teh shard entry from GSM shard status r.delShard(instance, shardSfSet.Name, shardSfSet, shardPod, int(i)) - r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev1alpha1.Terminated)) - r.updateShardStatus(instance, int(i), string(databasev1alpha1.Terminated)) + r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev4.Terminated)) + r.updateShardStatus(instance, int(i), string(databasev4.Terminated)) continue } // 4th step to check if shard is in GSM and shard is online if not then continue // CHeck before deletion if GSM is not ready set the Shard State to Delete Error - r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev1alpha1.DeletingState)) + r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev4.DeletingState)) err = shardingv1.CheckOnlineShardInGsm(gsmPod.Name, sparams, instance, r.kubeClient, r.kubeConfig, r.Log) if err != nil { // If the shard doesn't exist in GSM then just delete the shard statefulset and update GSM shard status /// Terminate state means we will remove teh shard entry from GSM shard status - r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev1alpha1.DeleteErrorState)) + r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev4.DeleteErrorState)) continue } // 5th Step @@ -1720,7 +1715,7 @@ func (r *ShardingDatabaseReconciler) delGsmShard(instance *databasev1alpha1.Shar if len(instance.Spec.ReplicationType) == 0 { err = shardingv1.MoveChunks(gsmPod.Name, sparams, instance, r.kubeClient, r.kubeConfig, r.Log) if err != nil { - r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev1alpha1.ChunkMoveError)) + r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev4.ChunkMoveError)) title = "Chunk Movement Failure" message = "Error occurred during chunk movement in shard " + shardingv1.GetFmtStr(OraShardSpex.Name) + " deletion." r.sendMessage(instance, title, message) @@ -1728,7 +1723,7 @@ func (r *ShardingDatabaseReconciler) delGsmShard(instance *databasev1alpha1.Shar err = shardingv1.InstanceShardPatch(instance, instance, r.Client, i, "isDelete", "failed") if err != nil { msg = "Error occurred while changing the isDelete value to failed in Spec struct" - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) return err } continue @@ -1752,9 +1747,9 @@ func (r *ShardingDatabaseReconciler) delGsmShard(instance *databasev1alpha1.Shar instance.Spec.Shard[i].IsDelete = "failed" err = shardingv1.InstanceShardPatch(instance, instance, r.Client, i, "isDelete", "failed") if err != nil { - // r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev1alpha1.ChunkMoveError)) + // r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev4.ChunkMoveError)) msg = "Error occurred while changing the isDelete value to failed in Spec struct" - shardingv1.LogMessages("INFO", msg, nil, instance, r.Log) + shardingv1.LogMessages("Error", msg, nil, instance, r.Log) // return err } return err @@ -1771,7 +1766,7 @@ func (r *ShardingDatabaseReconciler) delGsmShard(instance *databasev1alpha1.Shar if err != nil { msg = "Error occurred during shard" + shardingv1.GetFmtStr(OraShardSpex.Name) + "removal from Gsm" shardingv1.LogMessages("Error", msg, nil, instance, r.Log) - r.updateShardStatus(instance, int(i), string(databasev1alpha1.ShardRemoveError)) + r.updateShardStatus(instance, int(i), string(databasev4.ShardRemoveError)) instance.Spec.Shard[i].IsDelete = "failed" continue } @@ -1779,8 +1774,8 @@ func (r *ShardingDatabaseReconciler) delGsmShard(instance *databasev1alpha1.Shar // 8th Step // Delete the Statefulset as all the chunks has moved and Shard can be phyiscally deleted r.delShard(instance, shardSfSet.Name, shardSfSet, shardPod, int(i)) - r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev1alpha1.Terminated)) - r.updateShardStatus(instance, int(i), string(databasev1alpha1.Terminated)) + r.updateGsmShardStatus(instance, OraShardSpex.Name, string(databasev4.Terminated)) + r.updateShardStatus(instance, int(i), string(databasev4.Terminated)) title = "Shard Deletion Completed" message = "Shard deletion completed for shard " + shardingv1.GetFmtStr(OraShardSpex.Name) + " in GSM." r.sendMessage(instance, title, message) @@ -1792,7 +1787,7 @@ func (r *ShardingDatabaseReconciler) delGsmShard(instance *databasev1alpha1.Shar } // This function delete the physical shard -func (r *ShardingDatabaseReconciler) delShard(instance *databasev1alpha1.ShardingDatabase, sfSetName string, sfSetFound *appsv1.StatefulSet, sfsetPod *corev1.Pod, specIdx int) { +func (r *ShardingDatabaseReconciler) delShard(instance *databasev4.ShardingDatabase, sfSetName string, sfSetFound *appsv1.StatefulSet, sfsetPod *corev1.Pod, specIdx int) { //var status bool var err error @@ -1803,7 +1798,7 @@ func (r *ShardingDatabaseReconciler) delShard(instance *databasev1alpha1.Shardin if err != nil { msg := "Failed to patch the Shard StatefulSet: " + sfSetFound.Name shardingv1.LogMessages("DEBUG", msg, err, instance, r.Log) - r.updateShardStatus(instance, specIdx, string(databasev1alpha1.LabelPatchingError)) + r.updateShardStatus(instance, specIdx, string(databasev4.LabelPatchingError)) return } @@ -1811,7 +1806,7 @@ func (r *ShardingDatabaseReconciler) delShard(instance *databasev1alpha1.Shardin if err != nil { msg = "Failed to delete Shard StatefulSet: " + shardingv1.GetFmtStr(sfSetFound.Name) shardingv1.LogMessages("DEBUG", msg, err, instance, r.Log) - r.updateShardStatus(instance, specIdx, string(databasev1alpha1.DeleteErrorState)) + r.updateShardStatus(instance, specIdx, string(databasev4.DeleteErrorState)) return } /// Delete External Service @@ -1842,14 +1837,14 @@ func (r *ShardingDatabaseReconciler) delShard(instance *databasev1alpha1.Shardin if err != nil { msg = "Failed to delete Shard pvc claim " + shardingv1.GetFmtStr(pvcName) shardingv1.LogMessages("DEBUG", msg, err, instance, r.Log) - r.updateShardStatus(instance, specIdx, string(databasev1alpha1.DeletePVCError)) + r.updateShardStatus(instance, specIdx, string(databasev4.DeletePVCError)) } } } // ======== GSM Invited Node ========== // Remove and add GSM invited node -func (r *ShardingDatabaseReconciler) gsmInvitedNodeOp(instance *databasev1alpha1.ShardingDatabase, objName string, +func (r *ShardingDatabaseReconciler) gsmInvitedNodeOp(instance *databasev4.ShardingDatabase, objName string, ) { var msg string @@ -1892,10 +1887,10 @@ func (r *ShardingDatabaseReconciler) gsmInvitedNodeOp(instance *databasev1alpha1 // ================================== CREATE FUNCTIONS ============================= // This function create a service based isExtern parameter set in the yaml file -func (r *ShardingDatabaseReconciler) createService(instance *databasev1alpha1.ShardingDatabase, +func (r *ShardingDatabaseReconciler) createService(instance *databasev4.ShardingDatabase, dep *corev1.Service, ) (ctrl.Result, error) { - reqLogger := r.Log.WithValues("Instance.Namespace", instance.Spec.Namespace, "Instance.Name", instance.Name) + reqLogger := r.Log.WithValues("Instance.Namespace", instance.Namespace, "Instance.Name", instance.Name) // See if Service already exists and create if it doesn't // We are getting error on nil pointer segment when r.scheme is null // Error : invalid memory address or nil pointer dereference" (runtime error: invalid memory address or nil pointer dereference) @@ -1912,7 +1907,7 @@ func (r *ShardingDatabaseReconciler) createService(instance *databasev1alpha1.Sh err := r.Client.Get(context.TODO(), types.NamespacedName{ Name: dep.Name, - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, }, found) jsn, _ := json.Marshal(dep) @@ -1939,12 +1934,12 @@ func (r *ShardingDatabaseReconciler) createService(instance *databasev1alpha1.Sh } // This function deploy the statefulset -func (r *ShardingDatabaseReconciler) deployStatefulSet(instance *databasev1alpha1.ShardingDatabase, +func (r *ShardingDatabaseReconciler) deployStatefulSet(instance *databasev4.ShardingDatabase, dep *appsv1.StatefulSet, resType string, ) (ctrl.Result, error) { - reqLogger := r.Log.WithValues("Instance.Namespace", instance.Spec.Namespace, "Instance.Name", instance.Name) + reqLogger := r.Log.WithValues("Instance.Namespace", instance.Namespace, "Instance.Name", instance.Name) message := "Inside the deployStatefulSet function" shardingv1.LogMessages("DEBUG", message, nil, instance, r.Log) // See if StatefulSets already exists and create if it doesn't @@ -1961,7 +1956,7 @@ func (r *ShardingDatabaseReconciler) deployStatefulSet(instance *databasev1alpha found := &appsv1.StatefulSet{} err := r.Client.Get(context.TODO(), types.NamespacedName{ Name: dep.Name, - Namespace: instance.Spec.Namespace, + Namespace: instance.Namespace, }, found) jsn, _ := json.Marshal(dep) shardingv1.LogMessages("DEBUG", string(jsn), nil, instance, r.Log) @@ -1993,11 +1988,11 @@ func (r *ShardingDatabaseReconciler) deployStatefulSet(instance *databasev1alpha return ctrl.Result{}, nil } -func (r *ShardingDatabaseReconciler) checkShardState(instance *databasev1alpha1.ShardingDatabase) error { +func (r *ShardingDatabaseReconciler) checkShardState(instance *databasev4.ShardingDatabase) error { var i int32 var err error = nil - var OraShardSpex databasev1alpha1.ShardSpec + var OraShardSpex databasev4.ShardSpec var currState string var eventMsg string var msg string @@ -2016,34 +2011,29 @@ func (r *ShardingDatabaseReconciler) checkShardState(instance *databasev1alpha1. for i = 0; i < int32(len(instance.Spec.Shard)); i++ { OraShardSpex = instance.Spec.Shard[i] currState = shardingv1.GetGsmShardStatus(instance, OraShardSpex.Name) - if currState == string(databasev1alpha1.AddingShardState) { - eventMsg = "Shard Addition in progress. Requeuing" + if OraShardSpex.IsDelete == "failed" { + eventMsg = "Shard Deletion failed for [" + OraShardSpex.Name + "]. Retry shard deletion after manually moving the chunks. Requeuing" err = fmt.Errorf(eventMsg) - break - } else if currState == string(databasev1alpha1.DeletingState) { - eventMsg = "Shard Deletion in progress. Requeuing" + } else if currState == string(databasev4.AddingShardState) { + eventMsg = "Shard Addition in progress for [" + OraShardSpex.Name + "]. Requeuing" err = fmt.Errorf(eventMsg) - err = nil - break - } else if OraShardSpex.IsDelete == "failed" { - eventMsg = "Shard Deletion failed. Manual intervention required. Requeuing" + } else if currState == string(databasev4.DeletingState) { + eventMsg = "Shard Deletion in progress for [" + OraShardSpex.Name + "]. Requeuing" err = fmt.Errorf(eventMsg) - break - } else if currState == string(databasev1alpha1.DeleteErrorState) { - eventMsg = "Shard Deletion Error. Manual intervention required. Requeuing" + err = nil + } else if currState == string(databasev4.DeleteErrorState) { + eventMsg = "Shard Deletion Error for [" + OraShardSpex.Name + "]. Manual intervention required. Requeuing" err = fmt.Errorf(eventMsg) - break - } else if currState == string(databasev1alpha1.ShardRemoveError) { - eventMsg = "Shard Deletion Error. Manual intervention required. Requeuing" + } else if currState == string(databasev4.ShardRemoveError) { + eventMsg = "Shard Deletion Error for [" + OraShardSpex.Name + "]. Manual intervention required. Requeuing" err = fmt.Errorf(eventMsg) - break } else { - eventMsg = "checkShardState() : Shard State=[" + currState + "]" + eventMsg = "checkShardState() : Shard State[" + OraShardSpex.Name + "]=[" + currState + "]" shardingv1.LogMessages("INFO", eventMsg, nil, instance, r.Log) err = nil } + r.publishEvents(instance, eventMsg, currState) } - r.publishEvents(instance, eventMsg, currState) } return err } diff --git a/controllers/database/singleinstancedatabase_controller.go b/controllers/database/singleinstancedatabase_controller.go index a20fa1fd..13f2ec6f 100644 --- a/controllers/database/singleinstancedatabase_controller.go +++ b/controllers/database/singleinstancedatabase_controller.go @@ -46,7 +46,7 @@ import ( "strings" "time" - dbapi "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + dbapi "github.com/oracle/oracle-database-operator/apis/database/v4" dbcommons "github.com/oracle/oracle-database-operator/commons/database" "golang.org/x/text/cases" "golang.org/x/text/language" @@ -90,6 +90,11 @@ const singleInstanceDatabaseFinalizer = "database.oracle.com/singleinstancedatab var oemExpressUrl string +var ErrNotPhysicalStandby error = errors.New("database not in PHYSICAL_STANDBY role") +var ErrDBNotConfiguredWithDG error = errors.New("database is not configured with a dataguard configuration") +var ErrFSFOEnabledForDGConfig error = errors.New("database is configured with dataguard and FSFO enabled") +var ErrAdminPasswordSecretNotFound error = errors.New("Admin password secret for the database not found") + //+kubebuilder:rbac:groups=database.oracle.com,resources=singleinstancedatabases,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=database.oracle.com,resources=singleinstancedatabases/status,verbs=get;update;patch //+kubebuilder:rbac:groups=database.oracle.com,resources=singleinstancedatabases/finalizers,verbs=update @@ -220,7 +225,6 @@ func (r *SingleInstanceDatabaseReconciler) Reconcile(ctx context.Context, req ct sidbRole, err := dbcommons.GetDatabaseRole(readyPod, r, r.Config, ctx, req) if sidbRole == "PRIMARY" { - // Update DB config result, err = r.updateDBConfig(singleInstanceDatabase, readyPod, ctx, req) if result.Requeue { @@ -243,8 +247,7 @@ func (r *SingleInstanceDatabaseReconciler) Reconcile(ctx context.Context, req ct } } else { - // Database is in role of standby - if !singleInstanceDatabase.Status.DgBrokerConfigured { + if singleInstanceDatabase.Status.DgBroker == nil { err = SetupStandbyDatabase(r, singleInstanceDatabase, referredPrimaryDatabase, ctx, req) if err != nil { return requeueY, err @@ -280,6 +283,17 @@ func (r *SingleInstanceDatabaseReconciler) Reconcile(ctx context.Context, req ct } + // manage snapshot database creation + if singleInstanceDatabase.Spec.ConvertToSnapshotStandby != singleInstanceDatabase.Status.ConvertToSnapshotStandby { + result, err := r.manageConvPhysicalToSnapshot(ctx, req) + if err != nil { + return requeueN, err + } + if result.Requeue { + return requeueY, nil + } + } + // Run Datapatch if strings.ToUpper(singleInstanceDatabase.Status.Role) == "PRIMARY" && singleInstanceDatabase.Status.DatafilesPatched != "true" { // add a blocking reconcile condition @@ -293,7 +307,7 @@ func (r *SingleInstanceDatabaseReconciler) Reconcile(ctx context.Context, req ct } } - // If LoadBalancer = true , ensure Connect String is updated + // This is to ensure that in case of LoadBalancer services the, the Load Balancer is ready to serve the requests if singleInstanceDatabase.Status.ConnectString == dbcommons.ValueUnavailable { r.Log.Info("Connect string not available for the database " + singleInstanceDatabase.Name) return requeueY, nil @@ -466,8 +480,20 @@ func (r *SingleInstanceDatabaseReconciler) validate(m *dbapi.SingleInstanceDatab m.Status.Pdbname = m.Spec.Pdbname m.Status.Persistence = m.Spec.Persistence m.Status.PrebuiltDB = m.Spec.Image.PrebuiltDB - + if m.Spec.CreateAs == "truecache" { + // Fetch the Primary database reference, required for all iterations + err = r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: m.Spec.PrimaryDatabaseRef}, rp) + if err != nil { + if apierrors.IsNotFound(err) { + r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, err.Error()) + r.Log.Info(err.Error()) + return requeueN, err + } + return requeueY, err + } + } if m.Spec.CreateAs == "clone" { + // Once a clone database has created , it has no link with its reference if m.Status.DatafilesCreated == "true" || !dbcommons.IsSourceDatabaseOnCluster(m.Spec.PrimaryDatabaseRef) { @@ -904,6 +930,49 @@ func (r *SingleInstanceDatabaseReconciler) instantiatePodSpec(m *dbapi.SingleIns return mounts }(), Env: func() []corev1.EnvVar { + if m.Spec.CreateAs == "truecache" { + return []corev1.EnvVar{ + { + Name: "SVC_HOST", + Value: m.Name, + }, + { + Name: "SVC_PORT", + Value: strconv.Itoa(int(dbcommons.CONTAINER_LISTENER_PORT)), + }, + { + Name: "ORACLE_CHARACTERSET", + Value: m.Spec.Charset, + }, + { + Name: "ORACLE_EDITION", + Value: m.Spec.Edition, + }, + { + Name: "TRUE_CACHE", + Value: "true", + }, + { + Name: "PRIMARY_DB_CONN_STR", + Value: func() string { + if dbcommons.IsSourceDatabaseOnCluster(m.Spec.PrimaryDatabaseRef) { + return rp.Name + ":" + strconv.Itoa(int(dbcommons.CONTAINER_LISTENER_PORT)) + "/" + rp.Spec.Sid + } + return m.Spec.PrimaryDatabaseRef + }(), + }, + { + Name: "PDB_TC_SVCS", + Value: func() string { + return strings.Join(m.Spec.TrueCacheServices, ";") + }(), + }, + { + Name: "ORACLE_HOSTNAME", + Value: m.Name, + }, + } + } // adding XE support, useful for dev/test/CI-CD if m.Spec.Edition == "express" || m.Spec.Edition == "free" { return []corev1.EnvVar{ @@ -1098,35 +1167,27 @@ func (r *SingleInstanceDatabaseReconciler) instantiatePodSpec(m *dbapi.SingleIns }(), Resources: func() corev1.ResourceRequirements { - if m.Spec.Resources.Requests != nil && m.Spec.Resources.Limits != nil { - return corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - "cpu": resource.MustParse(m.Spec.Resources.Requests.Cpu), - "memory": resource.MustParse(m.Spec.Resources.Requests.Memory), - }, - Limits: corev1.ResourceList{ - "cpu": resource.MustParse(m.Spec.Resources.Limits.Cpu), - "memory": resource.MustParse(m.Spec.Resources.Requests.Memory), - }, - } - } else if m.Spec.Resources.Requests != nil { - return corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - "cpu": resource.MustParse(m.Spec.Resources.Requests.Cpu), - "memory": resource.MustParse(m.Spec.Resources.Requests.Memory), - }, - } - } else if m.Spec.Resources.Limits != nil { - return corev1.ResourceRequirements{ - Limits: corev1.ResourceList{ - "cpu": resource.MustParse(m.Spec.Resources.Limits.Cpu), - "memory": resource.MustParse(m.Spec.Resources.Requests.Memory), - }, - } - } else { - return corev1.ResourceRequirements{} + var resourceReqRequests corev1.ResourceList = corev1.ResourceList{} + var resourceReqLimits corev1.ResourceList = corev1.ResourceList{} + + if m.Spec.Resources.Requests != nil && m.Spec.Resources.Requests.Cpu != "" { + resourceReqRequests["cpu"] = resource.MustParse(m.Spec.Resources.Requests.Cpu) + } + if m.Spec.Resources.Requests != nil && m.Spec.Resources.Requests.Memory != "" { + resourceReqRequests["memory"] = resource.MustParse(m.Spec.Resources.Requests.Memory) + } + + if m.Spec.Resources.Limits != nil && m.Spec.Resources.Limits.Cpu != "" { + resourceReqLimits["cpu"] = resource.MustParse(m.Spec.Resources.Limits.Cpu) + } + if m.Spec.Resources.Limits != nil && m.Spec.Resources.Limits.Memory != "" { + resourceReqLimits["memory"] = resource.MustParse(m.Spec.Resources.Limits.Memory) } + return corev1.ResourceRequirements{ + Requests: resourceReqRequests, + Limits: resourceReqLimits, + } }(), }}, @@ -1205,39 +1266,35 @@ func (r *SingleInstanceDatabaseReconciler) instantiatePodSpec(m *dbapi.SingleIns // // ############################################################################# func (r *SingleInstanceDatabaseReconciler) instantiateSVCSpec(m *dbapi.SingleInstanceDatabase, - svcName string, ports []corev1.ServicePort, svcType corev1.ServiceType) *corev1.Service { - svc := &corev1.Service{ - TypeMeta: metav1.TypeMeta{ - Kind: "Service", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: svcName, - Namespace: m.Namespace, - Labels: map[string]string{ + svcName string, ports []corev1.ServicePort, svcType corev1.ServiceType, publishNotReadyAddress bool) *corev1.Service { + svc := dbcommons.NewRealServiceBuilder(). + SetName(svcName). + SetNamespace(m.Namespace). + SetLabels(func() map[string]string { + return map[string]string{ "app": m.Name, - }, - Annotations: func() map[string]string { - annotations := make(map[string]string) - if len(m.Spec.ServiceAnnotations) != 0 { - for key, value := range m.Spec.ServiceAnnotations { - annotations[key] = value - } + } + }()). + SetAnnotation(func() map[string]string { + annotations := make(map[string]string) + if len(m.Spec.ServiceAnnotations) != 0 { + for key, value := range m.Spec.ServiceAnnotations { + annotations[key] = value } - return annotations - }(), - }, - Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{}, - Selector: map[string]string{ + } + return annotations + }()). + SetPorts(ports). + SetSelector(func() map[string]string { + return map[string]string{ "app": m.Name, - }, - Type: svcType, - }, - } - svc.Spec.Ports = ports - // Set SingleInstanceDatabase instance as the owner and controller - ctrl.SetControllerReference(m, svc, r.Scheme) - return svc + } + }()). + SetPublishNotReadyAddresses(publishNotReadyAddress). + SetType(svcType). + Build() + ctrl.SetControllerReference(m, &svc, r.Scheme) + return &svc } // ############################################################################# @@ -1572,7 +1629,7 @@ func (r *SingleInstanceDatabaseReconciler) createOrReplaceSVC(ctx context.Contex if getClusterSvcErr != nil && apierrors.IsNotFound(getClusterSvcErr) { // Create a new ClusterIP service ports := []corev1.ServicePort{{Name: "listener", Port: dbcommons.CONTAINER_LISTENER_PORT, Protocol: corev1.ProtocolTCP}} - svc := r.instantiateSVCSpec(m, clusterSvcName, ports, corev1.ServiceType("ClusterIP")) + svc := r.instantiateSVCSpec(m, clusterSvcName, ports, corev1.ServiceType("ClusterIP"), true) log.Info("Creating a new service", "Service.Namespace", svc.Namespace, "Service.Name", svc.Name) err := r.Create(ctx, svc) if err != nil { @@ -1774,7 +1831,7 @@ func (r *SingleInstanceDatabaseReconciler) createOrReplaceSVC(ctx context.Contex } // Create the service - svc := r.instantiateSVCSpec(m, extSvcName, ports, extSvcType) + svc := r.instantiateSVCSpec(m, extSvcName, ports, extSvcType, false) log.Info("Creating a new service", "Service.Namespace", svc.Namespace, "Service.Name", svc.Name) err := r.Create(ctx, svc) if err != nil { @@ -3129,7 +3186,7 @@ func (r *SingleInstanceDatabaseReconciler) cleanupSingleInstanceDatabase(req ctr return requeueY, nil } - if m.Status.DgBrokerConfigured { + if m.Status.DgBroker != nil { eventReason := "Cannot Delete" eventMsg := "database cannot be deleted as it is present in a DataGuard Broker configuration" r.Recorder.Eventf(m, corev1.EventTypeWarning, eventReason, eventMsg) @@ -3163,6 +3220,198 @@ func (r *SingleInstanceDatabaseReconciler) cleanupSingleInstanceDatabase(req ctr return requeueN, nil } +// ############################################################################################# +// +// Manage conversion of singleinstancedatabase from PHYSICAL_STANDBY To SNAPSHOT_STANDBY +// +// ############################################################################################# +func (r *SingleInstanceDatabaseReconciler) manageConvPhysicalToSnapshot(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("manageConvPhysicalToSnapshot", req.NamespacedName) + var singleInstanceDatabase dbapi.SingleInstanceDatabase + if err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: req.Name}, &singleInstanceDatabase); err != nil { + if apierrors.IsNotFound(err) { + log.Info("requested resource not found") + return requeueY, nil + } + log.Error(err, err.Error()) + return requeueY, err + } + + sidbReadyPod, err := GetDatabaseReadyPod(r, &singleInstanceDatabase, ctx, req) + if err != nil { + return requeueY, err + } + if sidbReadyPod.Name == "" { + log.Info("No ready Pod for the requested singleinstancedatabase") + return requeueY, nil + } + + if singleInstanceDatabase.Spec.ConvertToSnapshotStandby { + // Convert a PHYSICAL_STANDBY -> SNAPSHOT_STANDBY + singleInstanceDatabase.Status.Status = dbcommons.StatusUpdating + r.Status().Update(ctx, &singleInstanceDatabase) + if err := convertPhysicalStdToSnapshotStdDB(r, &singleInstanceDatabase, &sidbReadyPod, ctx, req); err != nil { + switch err { + case ErrNotPhysicalStandby: + r.Recorder.Event(&singleInstanceDatabase, corev1.EventTypeWarning, "Conversion to Snapshot Standby Not allowed", "Database not in physical standby role") + log.Info("Conversion to Snapshot Standby not allowed as database not in physical standby role") + return requeueY, nil + case ErrDBNotConfiguredWithDG: + // cannot convert to snapshot database + r.Recorder.Event(&singleInstanceDatabase, corev1.EventTypeWarning, "Conversion to Snapshot Standby Not allowed", "Database is not configured with dataguard") + log.Info("Conversion to Snapshot Standby not allowed as requested database is not configured with dataguard") + return requeueY, nil + case ErrFSFOEnabledForDGConfig: + r.Recorder.Event(&singleInstanceDatabase, corev1.EventTypeWarning, "Conversion to Snapshot Standby Not allowed", "Database is a FastStartFailover target") + log.Info("Conversion to Snapshot Standby Not allowed as database is a FastStartFailover target") + return requeueY, nil + case ErrAdminPasswordSecretNotFound: + r.Recorder.Event(&singleInstanceDatabase, corev1.EventTypeWarning, "Admin Password", "Database admin password secret not found") + log.Info("Database admin password secret not found") + return requeueY, nil + default: + log.Error(err, err.Error()) + return requeueY, nil + } + } + log.Info(fmt.Sprintf("Database %s converted to snapshot standby", singleInstanceDatabase.Name)) + singleInstanceDatabase.Status.ConvertToSnapshotStandby = true + singleInstanceDatabase.Status.Status = dbcommons.StatusReady + // Get database role and update the status + sidbRole, err := dbcommons.GetDatabaseRole(sidbReadyPod, r, r.Config, ctx, req) + if err != nil { + return requeueN, err + } + log.Info("Database "+singleInstanceDatabase.Name, "Database Role : ", sidbRole) + singleInstanceDatabase.Status.Role = sidbRole + r.Status().Update(ctx, &singleInstanceDatabase) + } else { + // Convert a SNAPSHOT_STANDBY -> PHYSICAL_STANDBY + singleInstanceDatabase.Status.Status = dbcommons.StatusUpdating + r.Status().Update(ctx, &singleInstanceDatabase) + if err := convertSnapshotStdToPhysicalStdDB(r, &singleInstanceDatabase, &sidbReadyPod, ctx, req); err != nil { + switch err { + default: + r.Log.Error(err, err.Error()) + return requeueY, nil + } + } + singleInstanceDatabase.Status.ConvertToSnapshotStandby = false + singleInstanceDatabase.Status.Status = dbcommons.StatusReady + // Get database role and update the status + sidbRole, err := dbcommons.GetDatabaseRole(sidbReadyPod, r, r.Config, ctx, req) + if err != nil { + return requeueN, err + } + log.Info("Database "+singleInstanceDatabase.Name, "Database Role : ", sidbRole) + singleInstanceDatabase.Status.Role = sidbRole + r.Status().Update(ctx, &singleInstanceDatabase) + } + + return requeueN, nil +} + +func convertPhysicalStdToSnapshotStdDB(r *SingleInstanceDatabaseReconciler, singleInstanceDatabase *dbapi.SingleInstanceDatabase, sidbReadyPod *corev1.Pod, ctx context.Context, req ctrl.Request) error { + log := r.Log.WithValues("convertPhysicalStdToSnapshotStdDB", req.NamespacedName) + log.Info(fmt.Sprintf("Checking the role %s database i.e %s", singleInstanceDatabase.Name, singleInstanceDatabase.Status.Role)) + if singleInstanceDatabase.Status.Role != "PHYSICAL_STANDBY" { + return ErrNotPhysicalStandby + } + + var dataguardBroker dbapi.DataguardBroker + log.Info(fmt.Sprintf("Checking if the database %s is configured with dgbroker or not ?", singleInstanceDatabase.Name)) + if singleInstanceDatabase.Status.DgBroker != nil { + if err := r.Get(ctx, types.NamespacedName{Namespace: singleInstanceDatabase.Namespace, Name: *singleInstanceDatabase.Status.DgBroker}, &dataguardBroker); err != nil { + if apierrors.IsNotFound(err) { + log.Info("Resource not found") + return errors.New("Dataguardbroker resource not found") + } + return err + } + log.Info(fmt.Sprintf("database %s is configured with dgbroker %s", singleInstanceDatabase.Name, *singleInstanceDatabase.Status.DgBroker)) + if fastStartFailoverStatus, _ := strconv.ParseBool(dataguardBroker.Status.FastStartFailover); fastStartFailoverStatus { + // not allowed to convert to snapshot standby + return ErrFSFOEnabledForDGConfig + } + } else { + // cannot convert to snapshot database + return ErrDBNotConfiguredWithDG + } + + // get singleinstancedatabase ready pod + // execute the dgmgrl command for conversion to snapshot database + // Exception handling + // Get Admin password for current primary database + var adminPasswordSecret corev1.Secret + if err := r.Get(context.TODO(), types.NamespacedName{Name: singleInstanceDatabase.Spec.AdminPassword.SecretName, Namespace: singleInstanceDatabase.Namespace}, &adminPasswordSecret); err != nil { + return err + } + var adminPassword string = string(adminPasswordSecret.Data[singleInstanceDatabase.Spec.AdminPassword.SecretKey]) + + // Connect to 'primarySid' db using dgmgrl and switchover to 'targetSidbSid' db to make 'targetSidbSid' db primary + if _, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, true, "bash", "-c", fmt.Sprintf(dbcommons.CreateAdminPasswordFile, adminPassword)); err != nil { + return err + } + + out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, true, "bash", "-c", fmt.Sprintf("dgmgrl sys@%s \"convert database %s to snapshot standby;\" < admin.pwd", dataguardBroker.Status.PrimaryDatabase, singleInstanceDatabase.Status.Sid)) + if err != nil { + return err + } + log.Info(fmt.Sprintf("Convert to snapshot standby command output \n %s", out)) + + out, err = dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, true, "bash", "-c", fmt.Sprintf("echo -e \"alter pluggable database %s open;\" | %s", singleInstanceDatabase.Status.Pdbname, dbcommons.SQLPlusCLI)) + if err != nil { + return err + } + log.Info(fmt.Sprintf("Open pluggable databases output \n %s", out)) + + return nil +} + +func convertSnapshotStdToPhysicalStdDB(r *SingleInstanceDatabaseReconciler, singleInstanceDatabase *dbapi.SingleInstanceDatabase, sidbReadyPod *corev1.Pod, ctx context.Context, req ctrl.Request) error { + log := r.Log.WithValues("convertSnapshotStdToPhysicalStdDB", req.NamespacedName) + + var dataguardBroker dbapi.DataguardBroker + if err := r.Get(ctx, types.NamespacedName{Namespace: singleInstanceDatabase.Namespace, Name: *singleInstanceDatabase.Status.DgBroker}, &dataguardBroker); err != nil { + if apierrors.IsNotFound(err) { + return errors.New("dataguardbroker resource not found") + } + return err + } + + var adminPasswordSecret corev1.Secret + if err := r.Get(context.TODO(), types.NamespacedName{Name: singleInstanceDatabase.Spec.AdminPassword.SecretName, Namespace: singleInstanceDatabase.Namespace}, &adminPasswordSecret); err != nil { + if apierrors.IsNotFound(err) { + return ErrAdminPasswordSecretNotFound + } + return err + } + var adminPassword string = string(adminPasswordSecret.Data[singleInstanceDatabase.Spec.AdminPassword.SecretKey]) + + // Connect to 'primarySid' db using dgmgrl and switchover to 'targetSidbSid' db to make 'targetSidbSid' db primary + _, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, true, "bash", "-c", + fmt.Sprintf(dbcommons.CreateAdminPasswordFile, adminPassword)) + if err != nil { + return err + } + log.Info("Converting snapshot standby to physical standby") + out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, true, "bash", "-c", fmt.Sprintf("dgmgrl sys@%s \"convert database %s to physical standby;\" < admin.pwd", dataguardBroker.Status.PrimaryDatabase, singleInstanceDatabase.Status.Sid)) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info(fmt.Sprintf("Database %s converted to physical standby \n %s", singleInstanceDatabase.Name, out)) + log.Info("opening the PDB for the database") + out, err = dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, true, "bash", "-c", fmt.Sprintf("echo -e \"alter pluggable database %s open;\" | %s", singleInstanceDatabase.Status.Pdbname, dbcommons.SQLPlusCLI)) + if err != nil { + r.Log.Error(err, err.Error()) + return err + } + log.Info(fmt.Sprintf("PDB open command output %s", out)) + + return nil +} + // ############################################################################# // // SetupWithManager sets up the controller with the Manager diff --git a/controllers/dataguard/datagauard_errors.go b/controllers/dataguard/datagauard_errors.go new file mode 100644 index 00000000..94b2b0ea --- /dev/null +++ b/controllers/dataguard/datagauard_errors.go @@ -0,0 +1,47 @@ +/* +** Copyright (c) 2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package controllers + +import ( + "errors" +) + +var ErrSidbWithMutipleReplicas error = errors.New("SingleInstanceDatabase with multiple replicas is not supported") +var ErrCurrentPrimaryDatabaseNotReady error = errors.New("current primary database not ready") +var ErrCurrentPrimaryDatabaseNotFound error = errors.New("current primary database not found") diff --git a/controllers/dataguard/dataguard_utils.go b/controllers/dataguard/dataguard_utils.go new file mode 100644 index 00000000..4c16f82b --- /dev/null +++ b/controllers/dataguard/dataguard_utils.go @@ -0,0 +1,1061 @@ +/* +** Copyright (c) 2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package controllers + +import ( + "context" + "errors" + "fmt" + "strconv" + "strings" + "time" + + dbapi "github.com/oracle/oracle-database-operator/apis/database/v4" + dbcommons "github.com/oracle/oracle-database-operator/commons/database" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + ctrllog "sigs.k8s.io/controller-runtime/pkg/log" +) + +// ############################################################################################################### +// +// Clean up necessary resources required prior to dataguardbroker resource deletion +// +// ############################################################################################################### +func cleanupDataguardBroker(r *DataguardBrokerReconciler, broker *dbapi.DataguardBroker, req ctrl.Request, ctx context.Context) error { + log := ctrllog.FromContext(ctx).WithValues("cleanupDataguardBroker", req.NamespacedName) + + log.Info(fmt.Sprintf("Cleaning for dataguard broker %v deletion", broker.Name)) + + // Fetch Primary Database Reference + var sidb dbapi.SingleInstanceDatabase + if err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: broker.GetCurrentPrimaryDatabase()}, &sidb); err != nil { + if apierrors.IsNotFound(err) { + log.Info(fmt.Sprintf("SingleInstanceDatabase %s deleted.", broker.GetCurrentPrimaryDatabase())) + return err + } + return err + } + + log.Info(fmt.Sprintf("The current primary database is %v", sidb.Name)) + + // Validate if Primary Database Reference is ready + if err := validateSidbReadiness(r, broker, &sidb, ctx, req); err != nil { + log.Info("Reconcile queued") + return err + } + + log.Info(fmt.Sprintf("The current primary database %v is ready and healthy", sidb.Name)) + + sidbReadyPod, _, _, _, err := dbcommons.FindPods(r, sidb.Spec.Image.Version, + sidb.Spec.Image.PullFrom, sidb.Name, sidb.Namespace, ctx, req) + if err != nil { + log.Error(err, err.Error()) + return err + } + + log.Info(fmt.Sprintf("Ready pod for the sidb %v is %v", sidb.Name, sidbReadyPod.Name)) + + out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | dgmgrl / as sysdba ", dbcommons.RemoveDataguardConfiguration)) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("RemoveDataguardConfiguration Output") + log.Info(out) + + for _, databaseRef := range broker.Status.DatabasesInDataguardConfig { + + var standbyDatabase dbapi.SingleInstanceDatabase + if err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: databaseRef}, &standbyDatabase); err != nil { + if apierrors.IsNotFound(err) { + continue + } + log.Error(err, err.Error()) + return err + } + + // Set DgBrokerConfigured to false + standbyDatabase.Status.DgBroker = nil + if err := r.Status().Update(ctx, &standbyDatabase); err != nil { + r.Recorder.Eventf(&standbyDatabase, corev1.EventTypeWarning, "Updating Status", "DgBrokerConfigured status updation failed") + log.Info(fmt.Sprintf("Status updation for sidb %s failed", standbyDatabase.Name)) + return err + } + } + + log.Info("Successfully cleaned up Dataguard Broker") + return nil +} + +// ##################################################################################################### +// +// Validate readiness of the primary singleinstancedatabase specified +// +// ##################################################################################################### +func validateSidbReadiness(r *DataguardBrokerReconciler, broker *dbapi.DataguardBroker, sidb *dbapi.SingleInstanceDatabase, ctx context.Context, req ctrl.Request) error { + + log := r.Log.WithValues("validateSidbReadiness", req.NamespacedName) + + var adminPassword string + var sidbReadyPod corev1.Pod + + // Check if current primary singleinstancedatabase is "ready" + if sidb.Status.Status != dbcommons.StatusReady { + return ErrCurrentPrimaryDatabaseNotReady + } + + // ## FETCH THE SIDB REPLICAS . + sidbReadyPod, _, _, _, err := dbcommons.FindPods(r, sidb.Spec.Image.Version, + sidb.Spec.Image.PullFrom, sidb.Name, sidb.Namespace, ctx, req) + if err != nil { + log.Error(err, err.Error()) + return err + } + if sidbReadyPod.Name == "" { + log.Info("No ready pod avail for the singleinstancedatabase") + return ErrCurrentPrimaryDatabaseNotReady + } + + log.Info(fmt.Sprintf("Ready pod for the singleInstanceDatabase %s is %s", sidb.Name, sidbReadyPod.Name)) + + // Validate databaseRef Admin Password + var adminPasswordSecret corev1.Secret + err = r.Get(ctx, types.NamespacedName{Name: sidb.Spec.AdminPassword.SecretName, Namespace: sidb.Namespace}, &adminPasswordSecret) + if err != nil { + if apierrors.IsNotFound(err) { + //m.Status.Status = dbcommons.StatusError + eventReason := "Waiting" + eventMsg := "waiting for : " + sidb.Spec.AdminPassword.SecretName + " to get created" + r.Recorder.Eventf(broker, corev1.EventTypeNormal, eventReason, eventMsg) + r.Log.Info("Secret " + sidb.Spec.AdminPassword.SecretName + " Not Found") + return fmt.Errorf("adminPassword secret for singleinstancedatabase %v not found", sidb.Name) + } + log.Error(err, err.Error()) + return err + } + adminPassword = string(adminPasswordSecret.Data[sidb.Spec.AdminPassword.SecretKey]) + + out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, true, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | %s", fmt.Sprintf(dbcommons.ValidateAdminPassword, adminPassword), dbcommons.GetSqlClient(sidb.Spec.Edition))) + if err != nil { + fastStartFailoverStatus, _ := strconv.ParseBool(broker.Status.FastStartFailover) + if strings.Contains(err.Error(), "dialing backend") && broker.Status.Status == dbcommons.StatusReady && fastStartFailoverStatus { + // Connection to the pod is failing after broker came up and running + // Might suggest disconnect or pod/vm going down + log.Info("Dialing connection error") + if err := updateReconcileStatus(r, broker, ctx, req); err != nil { + return err + } + } + log.Error(err, err.Error()) + return err + } + + if strings.Contains(out, "USER is \"SYS\"") { + log.Info("validated Admin password successfully") + } else if strings.Contains(out, "ORA-01017") { + //m.Status.Status = dbcommons.StatusError + eventReason := "Logon denied" + eventMsg := "invalid databaseRef admin password. secret: " + sidb.Spec.AdminPassword.SecretName + r.Recorder.Eventf(broker, corev1.EventTypeWarning, eventReason, eventMsg) + return fmt.Errorf("logon denied for singleinstancedatabase %v", sidb.Name) + } else { + return fmt.Errorf("%v", out) + } + + return nil +} + +// ############################################################################# +// +// Setup the requested dataguard Configuration +// +// ############################################################################# +func setupDataguardBrokerConfiguration(r *DataguardBrokerReconciler, broker *dbapi.DataguardBroker, sidb *dbapi.SingleInstanceDatabase, + ctx context.Context, req ctrl.Request) error { + + log := r.Log.WithValues("setupDataguardBrokerConfiguration", req.NamespacedName) + + // Get sidb ready pod for current primary database + sidbReadyPod, _, _, _, err := dbcommons.FindPods(r, sidb.Spec.Image.Version, + sidb.Spec.Image.PullFrom, sidb.Name, sidb.Namespace, ctx, req) + if err != nil { + log.Error(err, err.Error()) + return err + } + + log.Info(fmt.Sprintf("broker.Spec.StandbyDatabaseRefs are %v", broker.Spec.StandbyDatabaseRefs)) + + for _, database := range broker.Spec.StandbyDatabaseRefs { + + log.Info(fmt.Sprintf("adding database %v", database)) + + // Get the standby database resource + var standbyDatabase dbapi.SingleInstanceDatabase + err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: database}, &standbyDatabase) + if err != nil { + if apierrors.IsNotFound(err) { + eventReason := "Warning" + eventMsg := database + "not found" + r.Recorder.Eventf(broker, corev1.EventTypeNormal, eventReason, eventMsg) + continue + } + log.Error(err, err.Error()) + return err + } + + // validate standby database status + if standbyDatabase.Status.Status != dbcommons.StatusReady { + eventReason := "Waiting" + eventMsg := "Waiting for " + standbyDatabase.Name + " to be Ready" + r.Recorder.Eventf(broker, corev1.EventTypeNormal, eventReason, eventMsg) + log.Info(fmt.Sprintf("single instance database %s not ready yet", standbyDatabase.Name)) + continue + } + + // Check if dataguard broker is already configured for the standby database + if standbyDatabase.Status.DgBroker != nil { + log.Info("Dataguard broker for standbyDatabase : " + standbyDatabase.Name + " is already configured") + continue + } + + // Check if dataguard broker already has a database with the same SID + _, ok := broker.Status.DatabasesInDataguardConfig[strings.ToUpper(standbyDatabase.Status.Sid)] + if ok { + log.Info("A database with the same SID is already configured in the DG") + r.Recorder.Eventf(broker, corev1.EventTypeWarning, "Spec Error", "A database with the same SID "+standbyDatabase.Status.Sid+" is already configured in the DG") + continue + } + + broker.Status.Status = dbcommons.StatusCreating + r.Status().Update(ctx, broker) + + // ## FETCH THE STANDBY REPLICAS . + standbyDatabaseReadyPod, _, _, _, err := dbcommons.FindPods(r, sidb.Spec.Image.Version, + sidb.Spec.Image.PullFrom, standbyDatabase.Name, standbyDatabase.Namespace, ctx, req) + if err != nil { + log.Error(err, err.Error()) + return err + } + + var adminPasswordSecret corev1.Secret + if err := r.Get(ctx, types.NamespacedName{Name: sidb.Spec.AdminPassword.SecretName, Namespace: sidb.Namespace}, &adminPasswordSecret); err != nil { + return err + } + var adminPassword string = string(adminPasswordSecret.Data[sidb.Spec.AdminPassword.SecretKey]) + if err := setupDataguardBrokerConfigurationForGivenDB(r, broker, sidb, &standbyDatabase, standbyDatabaseReadyPod, sidbReadyPod, ctx, req, adminPassword); err != nil { + log.Error(err, fmt.Sprintf(" Error while setting up DG broker for the Database %v:%v", standbyDatabase.Status.Sid, standbyDatabase.Name)) + return err + } + if len(broker.Status.DatabasesInDataguardConfig) == 0 { + log.Info("DatabasesInDataguardConfig is nil") + broker.Status.DatabasesInDataguardConfig = make(map[string]string) + } + log.Info(fmt.Sprintf("adding %v:%v to the map", standbyDatabase.Status.Sid, standbyDatabase.Name)) + broker.Status.DatabasesInDataguardConfig[standbyDatabase.Status.Sid] = standbyDatabase.Name + r.Status().Update(ctx, broker) + // Update Databases + } + if len(broker.Status.DatabasesInDataguardConfig) == 0 { + broker.Status.DatabasesInDataguardConfig = make(map[string]string) + } + log.Info(fmt.Sprintf("adding primary database %v:%v to the map", sidb.Status.Sid, sidb.Name)) + broker.Status.DatabasesInDataguardConfig[sidb.Status.Sid] = sidb.Name + + eventReason := "DG Configuration up to date" + eventMsg := "" + + // Patch DataguardBroker Service to point selector to Current Primary Name + if err := patchService(r, broker, ctx, req); err != nil { + log.Error(err, err.Error()) + return err + } + + r.Recorder.Eventf(broker, corev1.EventTypeNormal, eventReason, eventMsg) + + return nil +} + +// ############################################################################# +// +// Set up dataguard Configuration for a given StandbyDatabase +// +// ############################################################################# +func setupDataguardBrokerConfigurationForGivenDB(r *DataguardBrokerReconciler, m *dbapi.DataguardBroker, n *dbapi.SingleInstanceDatabase, standbyDatabase *dbapi.SingleInstanceDatabase, + standbyDatabaseReadyPod corev1.Pod, sidbReadyPod corev1.Pod, ctx context.Context, req ctrl.Request, adminPassword string) error { + + log := r.Log.WithValues("setupDataguardBrokerConfigurationForGivenDB", req.NamespacedName) + + if standbyDatabaseReadyPod.Name == "" || sidbReadyPod.Name == "" { + return errors.New("no ready Pod for the singleinstancedatabase") + } + + // ## CHECK IF DG CONFIGURATION AVAILABLE IN PRIMARY DATABSE## + out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | dgmgrl / as sysdba ", dbcommons.DBShowConfigCMD)) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("ShowConfiguration Output") + log.Info(out) + + if strings.Contains(out, "ORA-16525") { + log.Info("ORA-16525: The Oracle Data Guard broker is not yet available on Primary") + return fmt.Errorf("ORA-16525: The Oracle Data Guard broker is not yet available on Primary database %v", n.Name) + } + + _, err = dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, true, "bash", "-c", + fmt.Sprintf(dbcommons.CreateAdminPasswordFile, adminPassword)) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("DB Admin pwd file created") + + // ORA-16532: Oracle Data Guard broker configuration does not exist , so create one + if strings.Contains(out, "ORA-16532") { + if m.Spec.ProtectionMode == "MaxPerformance" { + // Construct the password file and dgbroker command file + out, err := dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf(dbcommons.CreateDGMGRLScriptFile, dbcommons.DataguardBrokerMaxPerformanceCMD)) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("DGMGRL command file creation output") + log.Info(out) + + // ## DG CONFIGURATION FOR PRIMARY DB || MODE : MAXPERFORMANCE ## + out, err = dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + "dgmgrl sys@${PRIMARY_DB_CONN_STR} @dgmgrl.cmd < admin.pwd && rm -rf dgmgrl.cmd") + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("DgConfigurationMaxPerformance Output") + log.Info(out) + } else if m.Spec.ProtectionMode == "MaxAvailability" { + // ## DG CONFIGURATION FOR PRIMARY DB || MODE : MAX AVAILABILITY ## + out, err := dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf(dbcommons.CreateDGMGRLScriptFile, dbcommons.DataguardBrokerMaxAvailabilityCMD)) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("DGMGRL command file creation output") + log.Info(out) + + // ## DG CONFIGURATION FOR PRIMARY DB || MODE : MAXPERFORMANCE ## + out, err = dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + "dgmgrl sys@${PRIMARY_DB_CONN_STR} @dgmgrl.cmd < admin.pwd && rm -rf dgmgrl.cmd") + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("DgConfigurationMaxAvailability Output") + log.Info(out) + } else { + log.Info("SPECIFY correct Protection Mode . Either MaxAvailability or MaxPerformance") + return err + } + + // ## SHOW CONFIGURATION DG + out, err := dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | dgmgrl / as sysdba ", dbcommons.DBShowConfigCMD)) + if err != nil { + log.Error(err, err.Error()) + return err + } else { + log.Info("ShowConfiguration Output") + log.Info(out) + } + // Set DG Configured status to true for this standbyDatabase and primary Database. so that in next reconcilation, we dont configure this again + n.Status.DgBroker = &m.Name + standbyDatabase.Status.DgBroker = &m.Name + r.Status().Update(ctx, standbyDatabase) + r.Status().Update(ctx, n) + // Remove admin pwd file + _, err = dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, true, "bash", "-c", + dbcommons.RemoveAdminPasswordFile) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("DB Admin pwd file removed") + + return err + } + + // DG Configuration Exists . So add the standbyDatabase to the existing DG Configuration + databases, err := GetDatabasesInDataGuardConfigurationWithRole(r, m, ctx, req) + if err != nil { + log.Info("Error while setting up the dataguard configuration") + log.Error(err, err.Error()) + return err + } + + // ## ADD DATABASE TO DG CONFIG , IF NOT PRESENT + found, _ := dbcommons.IsDatabaseFound(standbyDatabase.Spec.Sid, databases, "") + if found { + return err + } + primarySid := dbcommons.GetPrimaryDatabase(databases) + + // If user adds a new standby to a dg config when failover happened to one ot the standbys, we need to have current primary connect string + primaryConnectString := n.Name + ":1521/" + primarySid + if !strings.EqualFold(primarySid, n.Spec.Sid) { + primaryConnectString = m.Status.DatabasesInDataguardConfig[strings.ToUpper(primarySid)] + ":1521/" + primarySid + } + + if m.Spec.ProtectionMode == "MaxPerformance" { + // ## DG CONFIGURATION FOR PRIMARY DB || MODE : MAXPERFORMANCE ## + out, err := dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf(dbcommons.CreateDGMGRLScriptFile, dbcommons.DataguardBrokerAddDBMaxPerformanceCMD)) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("DGMGRL command file creation output") + log.Info(out) + + out, err = dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("dgmgrl sys@%s @dgmgrl.cmd < admin.pwd && rm -rf dgmgrl.cmd ", primaryConnectString)) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("DgConfigurationMaxPerformance Output") + log.Info(out) + + } else if m.Spec.ProtectionMode == "MaxAvailability" { + // ## DG CONFIGURATION FOR PRIMARY DB || MODE : MAX AVAILABILITY ## + out, err := dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf(dbcommons.CreateDGMGRLScriptFile, dbcommons.DataguardBrokerAddDBMaxAvailabilityCMD)) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("DGMGRL command file creation output") + log.Info(out) + + out, err = dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("dgmgrl sys@%s @dgmgrl.cmd < admin.pwd && rm -rf dgmgrl.cmd ", primaryConnectString)) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("DgConfigurationMaxAvailability Output") + log.Info(out) + + } else { + log.Info("SPECIFY correct Protection Mode . Either MaxAvailability or MaxPerformance") + log.Error(err, err.Error()) + return err + } + + // Remove admin pwd file + _, err = dbcommons.ExecCommand(r, r.Config, standbyDatabaseReadyPod.Name, standbyDatabaseReadyPod.Namespace, "", ctx, req, true, "bash", "-c", + dbcommons.RemoveAdminPasswordFile) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("DB Admin pwd file removed") + + // Set DG Configured status to true for this standbyDatabase. so that in next reconcilation, we dont configure this again + standbyDatabase.Status.DgBroker = &m.Name + r.Status().Update(ctx, standbyDatabase) + + return nil +} + +// ########################################################################################################### +// +// Patch the service for dataguardbroker resource to point selector to current Primary Name +// +// ########################################################################################################### +func patchService(r *DataguardBrokerReconciler, broker *dbapi.DataguardBroker, ctx context.Context, req ctrl.Request) error { + log := r.Log.WithValues("patchService", req.NamespacedName) + + primaryDatabaseRef := broker.Status.DatabasesInDataguardConfig[broker.Status.PrimaryDatabase] + var svc *corev1.Service = &corev1.Service{} + + // fetch the k8s service for the dataguardbroker resource + err := r.Get(ctx, types.NamespacedName{Name: req.Name, Namespace: req.Namespace}, svc) + if err != nil { + return err + } + + log.Info(fmt.Sprintf("Patching Service %s to point to the currPrimaryDatabase %s", svc.Name, primaryDatabaseRef)) + + // updating service selector for the primary database pod to attach itself to the service + svc.Spec.Selector["app"] = primaryDatabaseRef + if err = r.Update(ctx, svc); err != nil { + return err + } + log.Info(fmt.Sprintf("Patching service %s successful ", svc.Name)) + + // updating the dataguardbroker resource connect strings + broker.Status.ClusterConnectString = svc.Name + "." + svc.Namespace + ":" + fmt.Sprint(svc.Spec.Ports[0].Port) + "/DATAGUARD" + if broker.Spec.LoadBalancer { + if len(svc.Status.LoadBalancer.Ingress) > 0 { + lbAddress := svc.Status.LoadBalancer.Ingress[0].Hostname + if lbAddress == "" { + lbAddress = svc.Status.LoadBalancer.Ingress[0].IP + } + broker.Status.ExternalConnectString = lbAddress + ":" + fmt.Sprint(svc.Spec.Ports[0].Port) + "/DATAGUARD" + } + } else { + nodeip := dbcommons.GetNodeIp(r, ctx, req) + if nodeip != "" { + broker.Status.ExternalConnectString = nodeip + ":" + fmt.Sprint(svc.Spec.Ports[0].NodePort) + "/DATAGUARD" + } + } + log.Info("Updated connect strings to the dataguard broker") + return nil +} + +// ########################################################################################################### +// +// Update Reconcile Status +// +// ########################################################################################################### +func updateReconcileStatus(r *DataguardBrokerReconciler, broker *dbapi.DataguardBroker, ctx context.Context, req ctrl.Request) (err error) { + + log := r.Log.WithValues("updateReconcileStatus", req.NamespacedName) + + // fetch the singleinstancedatabase (database sid) and their role in the dataguard configuration + var databases []string + databases, err = GetDatabasesInDataGuardConfigurationWithRole(r, broker, ctx, req) + if err != nil { + log.Info("Problem when retrieving the databases in dg config") + broker.Status.Status = dbcommons.StatusNotReady + r.Status().Update(ctx, broker) + return nil + } + + // loop over all the databases to update the status of the dataguardbroker and the singleinstancedatabase + var standbyDatabases string = "" + for i := 0; i < len(databases); i++ { + splitstr := strings.Split(databases[i], ":") + database := strings.ToUpper(splitstr[0]) + var singleInstanceDatabase dbapi.SingleInstanceDatabase + err := r.Get(ctx, types.NamespacedName{Name: broker.Status.DatabasesInDataguardConfig[database], Namespace: req.Namespace}, &singleInstanceDatabase) + if err != nil { + return err + } + log.Info(fmt.Sprintf("Checking current role of %v is %v and its status is %v", broker.Status.DatabasesInDataguardConfig[database], strings.ToUpper(splitstr[1]), singleInstanceDatabase.Status.Role)) + if singleInstanceDatabase.Status.Role != strings.ToUpper(splitstr[1]) { + singleInstanceDatabase.Status.Role = strings.ToUpper(splitstr[1]) + r.Status().Update(ctx, &singleInstanceDatabase) + } + if strings.ToUpper(splitstr[1]) == "PRIMARY" && strings.ToUpper(database) != strings.ToUpper(broker.Status.PrimaryDatabase) { + log.Info("primary Database is " + strings.ToUpper(database)) + broker.Status.PrimaryDatabase = strings.ToUpper(database) + // patch the service with the current primary + } + if strings.ToUpper(splitstr[1]) == "PHYSICAL_STANDBY" { + if standbyDatabases != "" { + standbyDatabases += "," + strings.ToUpper(splitstr[0]) + } else { + standbyDatabases = strings.ToUpper(splitstr[0]) + } + } + } + + broker.Status.StandbyDatabases = standbyDatabases + broker.Status.ProtectionMode = broker.Spec.ProtectionMode + r.Status().Update(ctx, broker) + + // patch the dataguardbroker resource service + if err := patchService(r, broker, ctx, req); err != nil { + return err + } + + return nil +} + +// ##################################################################################################### +// +// Get the avail FSFO targets for a given singleinstancedatabase sid +// +// ##################################################################################################### +func GetFSFOTargets(databaseSid string, databasesInDgConfig map[string]string) (string, error) { + if _, ok := databasesInDgConfig[databaseSid]; !ok { + return "", fmt.Errorf("database %s not in dataguard config", databasesInDgConfig[databaseSid]) + } + var fsfoTarget []string + for dbSid, _ := range databasesInDgConfig { + if strings.Compare(databaseSid, dbSid) != 0 { + fsfoTarget = append(fsfoTarget, dbSid) + } + } + return strings.Join(fsfoTarget, ","), nil +} + +// ##################################################################################################### +// +// Set faststartfailover targets accordingly to dataguard configuration +// +// ##################################################################################################### +func setFSFOTargets(r *DataguardBrokerReconciler, broker *dbapi.DataguardBroker, ctx context.Context, req ctrl.Request) error { + + log := r.Log.WithValues("setFSFOTargets", req.NamespacedName) + + // fetch the current primary singleinstancedatabase + var currentPrimaryDatabase dbapi.SingleInstanceDatabase + err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: broker.GetCurrentPrimaryDatabase()}, ¤tPrimaryDatabase) + if err != nil { + if apierrors.IsNotFound(err) { + r.Log.Info("Resource not found") + return nil + } + r.Log.Error(err, err.Error()) + return err + } + + log.Info(fmt.Sprintf("current primary database for the dg config is %s", currentPrimaryDatabase.Name)) + + // fetch the singleinstancedatabase ready pod + sidbReadyPod, _, _, _, err := dbcommons.FindPods(r, currentPrimaryDatabase.Spec.Image.Version, + currentPrimaryDatabase.Spec.Image.PullFrom, currentPrimaryDatabase.Name, currentPrimaryDatabase.Namespace, ctx, req) + if err != nil { + log.Error(err, err.Error()) + return fmt.Errorf("error while fetching ready pod for %s", currentPrimaryDatabase.Name) + } + + log.Info(fmt.Sprintf("current primary database ready pod is %s", sidbReadyPod.Name)) + + // fetch singleinstancedatabase admin password + var adminPasswordSecret corev1.Secret + if err = r.Get(ctx, types.NamespacedName{Name: currentPrimaryDatabase.Spec.AdminPassword.SecretName, Namespace: currentPrimaryDatabase.Namespace}, &adminPasswordSecret); err != nil { + if apierrors.IsNotFound(err) { + //m.Status.Status = dbcommons.StatusError + eventReason := "Waiting" + eventMsg := "waiting for : " + currentPrimaryDatabase.Spec.AdminPassword.SecretName + " to get created" + r.Recorder.Eventf(broker, corev1.EventTypeNormal, eventReason, eventMsg) + r.Log.Info("Secret " + currentPrimaryDatabase.Spec.AdminPassword.SecretName + " Not Found") + return errors.New("admin password secret not found") + } + log.Error(err, err.Error()) + return err + } + adminPassword := string(adminPasswordSecret.Data[currentPrimaryDatabase.Spec.AdminPassword.SecretKey]) + + for databaseSid, databaseRef := range broker.Status.DatabasesInDataguardConfig { + // construct FSFO target for this database + fsfoTargets, err := GetFSFOTargets(databaseSid, broker.Status.DatabasesInDataguardConfig) + if err != nil { + return err + } + log.Info(fmt.Sprintf("Setting fast start failover target for the database %s to %s", databaseRef, fsfoTargets)) + out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("echo -e \"EDIT DATABASE %s SET PROPERTY FASTSTARTFAILOVERTARGET=%s \" | dgmgrl sys/%s@%s ", + databaseSid, fsfoTargets, adminPassword, currentPrimaryDatabase.Status.Sid)) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("SETTING FSFO TARGET OUTPUT") + log.Info(out) + + out, err = dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("echo -e \"SHOW DATABASE %s FASTSTARTFAILOVERTARGET \" | dgmgrl sys/%s@%s ", databaseSid, adminPassword, currentPrimaryDatabase.Status.Sid)) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info("FSFO TARGETS OF " + databaseSid) + log.Info(out) + } + + // Set FSFO Targets according to the input yaml of broker + return nil +} + +// ############################################################################# +// +// Setup the requested dataguard configuration +// +// ############################################################################# +func createObserverPods(r *DataguardBrokerReconciler, broker *dbapi.DataguardBroker, ctx context.Context, req ctrl.Request) error { + + log := r.Log.WithValues("createObserverPods", req.NamespacedName) + + // fetch the current primary singleinstancedatabase resourcce + var currPrimaryDatabase dbapi.SingleInstanceDatabase + namespacedName := types.NamespacedName{ + Namespace: broker.Namespace, + Name: broker.GetCurrentPrimaryDatabase(), + } + if err := r.Get(ctx, namespacedName, &currPrimaryDatabase); err != nil { + if apierrors.IsNotFound(err) { + broker.Status.Status = dbcommons.StatusError + r.Recorder.Eventf(broker, corev1.EventTypeWarning, "SingleInstanceDatabase Not Found", fmt.Sprintf("SingleInstanceDatabase %s not found", namespacedName.Name)) + r.Log.Info(fmt.Sprintf("singleinstancedatabase %s not found", namespacedName.Name)) + return ErrCurrentPrimaryDatabaseNotFound + } + return err + } + + // fetch the dataguardbroker observer replicas + _, brokerReplicasFound, _, _, err := dbcommons.FindPods(r, "", "", broker.Name, broker.Namespace, ctx, req) + if err != nil { + log.Error(err, err.Error()) + return err + } + + if brokerReplicasFound > 0 { + return nil + } + + // Stop the already running observer + // find the avail pods for the currPrimaryDatabase + log.Info("Need to stop the observer if already running") + currPrimaryDatabaseReadyPod, _, _, _, err := dbcommons.FindPods(r, "", "", currPrimaryDatabase.Name, currPrimaryDatabase.Namespace, ctx, req) + if err != nil { + log.Error(err, err.Error()) + return err + } + if currPrimaryDatabaseReadyPod.Name == "" { + return errors.New("No ready pods avail ") + } + + // fetch singleinstancedatabase admin password + var adminPasswordSecret corev1.Secret + if err = r.Get(ctx, types.NamespacedName{Name: currPrimaryDatabase.Spec.AdminPassword.SecretName, Namespace: currPrimaryDatabase.Namespace}, &adminPasswordSecret); err != nil { + if apierrors.IsNotFound(err) { + //m.Status.Status = dbcommons.StatusError + eventReason := "Waiting" + eventMsg := "waiting for : " + currPrimaryDatabase.Spec.AdminPassword.SecretName + " to get created" + r.Recorder.Eventf(broker, corev1.EventTypeNormal, eventReason, eventMsg) + r.Log.Info("Secret " + currPrimaryDatabase.Spec.AdminPassword.SecretName + " Not Found") + return errors.New("admin password secret not found") + } + log.Error(err, err.Error()) + return err + } + adminPassword := string(adminPasswordSecret.Data[currPrimaryDatabase.Spec.AdminPassword.SecretKey]) + + out, err := dbcommons.ExecCommand(r, r.Config, currPrimaryDatabaseReadyPod.Name, currPrimaryDatabaseReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("echo -e \" STOP OBSERVER %s \" | dgmgrl sys/%s@%s ", broker.Name, adminPassword, currPrimaryDatabase.Status.Sid)) + if err != nil { + log.Error(err, err.Error()) + return err + } + log.Info(out) + // instantiate observer pod specification + pod := dbcommons.NewRealPodBuilder(). + SetNamespacedName(types.NamespacedName{ + Name: broker.Name + "-" + dbcommons.GenerateRandomString(5), + Namespace: broker.Namespace, + }). + SetLabels(map[string]string{ + "app": broker.Name, + "version": currPrimaryDatabase.Spec.Image.PullSecrets, + }). + SetTerminationGracePeriodSeconds(int64(30)). + SetNodeSelector(func() map[string]string { + var nsRule map[string]string = map[string]string{} + if len(broker.Spec.NodeSelector) != 0 { + for key, value := range broker.Spec.NodeSelector { + nsRule[key] = value + } + } + return nsRule + }()). + SetSecurityContext(corev1.PodSecurityContext{ + RunAsUser: func() *int64 { i := int64(54321); return &i }(), + FSGroup: func() *int64 { i := int64(54321); return &i }(), + }). + SetImagePullSecrets(currPrimaryDatabase.Spec.Image.PullSecrets). + AppendContainers(corev1.Container{ + Name: broker.Name, + Image: currPrimaryDatabase.Spec.Image.PullFrom, + Lifecycle: &corev1.Lifecycle{ + PreStop: &corev1.LifecycleHandler{ + Exec: &corev1.ExecAction{ + Command: []string{"/bin/sh", "-c", "/bin/echo -en 'shutdown abort;\n' | env ORACLE_SID=${ORACLE_SID^^} sqlplus -S / as sysdba"}, + }, + }, + }, + ImagePullPolicy: corev1.PullAlways, + Ports: []corev1.ContainerPort{{ContainerPort: 1521}, {ContainerPort: 5500}}, + + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + Exec: &corev1.ExecAction{ + Command: []string{"/bin/sh", "-c", "$ORACLE_BASE/checkDBLockStatus.sh"}, + }, + }, + InitialDelaySeconds: 20, + TimeoutSeconds: 20, + PeriodSeconds: 40, + }, + Env: []corev1.EnvVar{ + { + Name: "SVC_HOST", + Value: broker.Name, + }, + { + Name: "SVC_PORT", + Value: "1521", + }, + { + Name: "PRIMARY_DB_CONN_STR", + Value: currPrimaryDatabase.Name + ":1521/" + currPrimaryDatabase.Spec.Sid, + }, + { + Name: "DG_OBSERVER_ONLY", + Value: "true", + }, + { + Name: "DG_OBSERVER_NAME", + Value: broker.Name, + }, + { + // Sid used here only for Locking mechanism to work . + Name: "ORACLE_SID", + Value: "OBSRVR" + strings.ToUpper(currPrimaryDatabase.Spec.Sid), + }, + { + Name: "ORACLE_PWD", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: currPrimaryDatabase.Spec.AdminPassword.SecretName, + }, + Key: currPrimaryDatabase.Spec.AdminPassword.SecretKey, + }, + }, + }, + }, + }). + Build() + + // set the ownership and lifecyle of the observer pod to the dataguardbroker resource + ctrl.SetControllerReference(broker, &pod, r.Scheme) + + log.Info("Creating a new POD", "POD.Namespace", pod.Namespace, "POD.Name", pod.Name) + if err = r.Create(ctx, &pod); err != nil { + log.Error(err, "Failed to create new POD", "pod.Namespace", pod.Namespace, "POD.Name", pod.Name) + return err + } + + // Waiting for Pod to get created as sometimes it takes some time to create a Pod . 30 seconds TImeout + timeout := 30 + err = dbcommons.WaitForStatusChange(r, pod.Name, broker.Namespace, ctx, req, time.Duration(timeout)*time.Second, "pod", "creation") + if err != nil { + log.Error(err, "Error in Waiting for Pod status for Creation", "pod.Namespace", pod.Namespace, "POD.Name", pod.Name) + return err + } + log.Info("Succesfully Created New Pod ", "POD.NAME : ", pod.Name) + + eventReason := "SUCCESS" + eventMsg := "" + r.Recorder.Eventf(broker, corev1.EventTypeNormal, eventReason, eventMsg) + + return nil +} + +// ############################################################################# +// +// Enable faststartfailover for the dataguard configuration +// +// ############################################################################# +func enableFSFOForDgConfig(r *DataguardBrokerReconciler, broker *dbapi.DataguardBroker, ctx context.Context, req ctrl.Request) error { + + log := r.Log.WithValues("enableFSFOForDgConfig", req.NamespacedName) + + // Get the current primary singleinstancedatabase resourcce + var sidb dbapi.SingleInstanceDatabase + namespacedName := types.NamespacedName{ + Namespace: broker.Namespace, + Name: broker.GetCurrentPrimaryDatabase(), + } + if err := r.Get(ctx, namespacedName, &sidb); err != nil { + if apierrors.IsNotFound(err) { + broker.Status.Status = dbcommons.StatusError + r.Recorder.Eventf(broker, corev1.EventTypeWarning, "SingleInstanceDatabase Not Found", fmt.Sprintf("SingleInstanceDatabase %s not found", sidb.Name)) + log.Info(fmt.Sprintf("singleinstancedatabase %s not found", namespacedName.Name)) + return ErrCurrentPrimaryDatabaseNotFound + } + return err + } + + // fetch singleinstancedatabase ready pod + sidbReadyPod, _, _, _, err := dbcommons.FindPods(r, sidb.Spec.Image.Version, + sidb.Spec.Image.PullFrom, sidb.Name, sidb.Namespace, ctx, req) + if err != nil { + log.Error(err, err.Error()) + return err + } + + // fetch singleinstancedatabase adminpassword secret + var adminPasswordSecret corev1.Secret + if err := r.Get(ctx, types.NamespacedName{Name: sidb.Spec.AdminPassword.SecretName, Namespace: sidb.Namespace}, &adminPasswordSecret); err != nil { + return err + } + var adminPassword string = string(adminPasswordSecret.Data[sidb.Spec.AdminPassword.SecretKey]) + + r.Recorder.Eventf(broker, corev1.EventTypeNormal, "Enabling FastStartFailover", fmt.Sprintf("Enabling FastStartFailover for the dataguard broker %s", broker.Name)) + log.Info(fmt.Sprintf("Enabling FastStartFailover for the dataguard broker %s", broker.Name)) + + // enable faststartfailover for the dataguard configuration + out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | dgmgrl sys/%s@%s ", dbcommons.EnableFSFOCMD, adminPassword, sidb.Status.Sid)) + if err != nil { + r.Recorder.Eventf(broker, corev1.EventTypeWarning, "Enabling FastStartFailover failed", fmt.Sprintf("Enabling FastStartFailover for the dataguard broker %s failed", broker.Name)) + log.Error(err, err.Error()) + return err + } + log.Info("EnableFastStartFailover Output") + log.Info(out) + + r.Recorder.Eventf(broker, corev1.EventTypeNormal, "Enabling FastStartFailover successful", fmt.Sprintf("Enabling FastStartFailover for the dataguard broker %s successful", broker.Name)) + + return nil +} + +// ############################################################################# +// +// Disable faststartfailover for the dataguard configuration +// +// ############################################################################# +func disableFSFOForDGConfig(r *DataguardBrokerReconciler, broker *dbapi.DataguardBroker, ctx context.Context, req ctrl.Request) error { + + log := r.Log.WithValues("disableFSFOForDGConfig", req.NamespacedName) + + // Get the current primary singleinstancedatabase resource + var sidb dbapi.SingleInstanceDatabase + namespacedName := types.NamespacedName{ + Namespace: broker.Namespace, + Name: broker.GetCurrentPrimaryDatabase(), + } + if err := r.Get(ctx, namespacedName, &sidb); err != nil { + if apierrors.IsNotFound(err) { + broker.Status.Status = dbcommons.StatusError + r.Recorder.Eventf(broker, corev1.EventTypeWarning, "SingleInstanceDatabase Not Found", fmt.Sprintf("SingleInstanceDatabase %s not found", sidb.Name)) + log.Info(fmt.Sprintf("singleinstancedatabase %s not found", namespacedName.Name)) + return ErrCurrentPrimaryDatabaseNotFound + } + return err + } + + // fetch singleinstancedatabase ready pod + sidbReadyPod, _, _, _, err := dbcommons.FindPods(r, sidb.Spec.Image.Version, + sidb.Spec.Image.PullFrom, sidb.Name, sidb.Namespace, ctx, req) + if err != nil { + log.Error(err, err.Error()) + return err + } + + // fetch admin password for the singleinstancedatabase + var adminPasswordSecret corev1.Secret + if err := r.Get(ctx, types.NamespacedName{Name: sidb.Spec.AdminPassword.SecretName, Namespace: sidb.Namespace}, &adminPasswordSecret); err != nil { + return err + } + var adminPassword string = string(adminPasswordSecret.Data[sidb.Spec.AdminPassword.SecretKey]) + + r.Recorder.Eventf(broker, corev1.EventTypeNormal, "Disabling FastStartFailover", fmt.Sprintf("Disabling FastStartFailover for the dataguard broker %s", broker.Name)) + log.Info(fmt.Sprintf("Disabling FastStartFailover for the dataguard broker %s", broker.Name)) + + out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | dgmgrl sys/%s@%s ", fmt.Sprintf(dbcommons.DisableFSFOCMD, broker.Name), adminPassword, sidb.Status.Sid)) + if err != nil { + r.Recorder.Eventf(broker, corev1.EventTypeWarning, "Disabling FastStartFailover failed", fmt.Sprintf("Disabling FastStartFailover for the dataguard broker %s failed", broker.Name)) + log.Error(err, err.Error()) + return err + } + log.Info("DisableFastStartFailover Output") + log.Info(out) + + r.Recorder.Eventf(broker, corev1.EventTypeNormal, "Disabling FastStartFailover", "faststartfailover disabled successfully") + log.Info("faststartfailover disabled successfully") + + return nil +} + +// ############################################################################# +// +// Get databases in dataguard configuration along with their roles +// +// ############################################################################# +func GetDatabasesInDataGuardConfigurationWithRole(r *DataguardBrokerReconciler, broker *dbapi.DataguardBroker, ctx context.Context, req ctrl.Request) ([]string, error) { + r.Log.Info(fmt.Sprintf("GetDatabasesInDataGuardConfiguration are %v", broker.GetDatabasesInDataGuardConfiguration())) + for _, database := range broker.GetDatabasesInDataGuardConfiguration() { + + var singleInstanceDatabase dbapi.SingleInstanceDatabase + if err := r.Get(context.TODO(), types.NamespacedName{Namespace: broker.Namespace, Name: database}, &singleInstanceDatabase); err != nil { + // log about the error while fetching the database + continue + } + + // Fetch the primary database ready pod + sidbReadyPod, _, _, _, err := dbcommons.FindPods(r, singleInstanceDatabase.Spec.Image.Version, + singleInstanceDatabase.Spec.Image.PullFrom, singleInstanceDatabase.Name, singleInstanceDatabase.Namespace, ctx, req) + if err != nil || sidbReadyPod.Name == "" { + continue + } + + // try out + out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("echo -e \"%s\" | sqlplus -s / as sysdba ", dbcommons.DataguardBrokerGetDatabaseCMD)) + if err != nil || strings.Contains(out, "no rows selected") && strings.Contains(out, "ORA-") { + continue + } + + r.Log.Info(fmt.Sprintf("sidbReadyPod is %v \n output of the exec is %v \n and output contains ORA- is %v", sidbReadyPod.Name, out, strings.Contains(out, "ORA-"))) + + out1 := strings.Replace(out, " ", "_", -1) + // filtering output and storing databses in dg configuration in "databases" slice + databases := strings.Fields(out1) + + // first 2 values in the slice will be column name(DATABASES) and a seperator(--------------) . so take the slice from position [2:] + databases = databases[2:] + return databases, nil + } + + return []string{}, errors.New("cannot get databases in dataguard configuration") +} diff --git a/controllers/dataguard/dataguardbroker_controller.go b/controllers/dataguard/dataguardbroker_controller.go new file mode 100644 index 00000000..4d7ae044 --- /dev/null +++ b/controllers/dataguard/dataguardbroker_controller.go @@ -0,0 +1,513 @@ +/* +** Copyright (c) 2024 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package controllers + +import ( + "context" + "errors" + "fmt" + "strconv" + "time" + + "github.com/go-logr/logr" + dbapi "github.com/oracle/oracle-database-operator/apis/database/v4" + dbcommons "github.com/oracle/oracle-database-operator/commons/database" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +// DataguardBrokerReconciler reconciles a DataguardBroker object +type DataguardBrokerReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + Config *rest.Config + Recorder record.EventRecorder +} + +const dataguardBrokerFinalizer = "database.oracle.com/dataguardbrokerfinalizer" + +//+kubebuilder:rbac:groups=database.oracle.com,resources=dataguardbrokers,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=database.oracle.com,resources=dataguardbrokers/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=database.oracle.com,resources=dataguardbrokers/finalizers,verbs=update +//+kubebuilder:rbac:groups="",resources=pods;pods/log;pods/exec;persistentvolumeclaims;services,verbs=create;delete;get;list;patch;update;watch +//+kubebuilder:rbac:groups="",resources=events,verbs=create;patch + +func (r *DataguardBrokerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + + log := r.Log.WithValues("reconciler", req.NamespacedName) + + log.Info("Reconcile requested") + + // Get the dataguardbroker resource if already exists + var dataguardBroker dbapi.DataguardBroker + if err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: req.Name}, &dataguardBroker); err != nil { + if apierrors.IsNotFound(err) { + log.Info("Resource deleted") + return ctrl.Result{Requeue: false}, nil + } + return ctrl.Result{Requeue: false}, err + } + + // Manage dataguardbroker deletion + if !dataguardBroker.DeletionTimestamp.IsZero() { + return r.manageDataguardBrokerDeletion(&dataguardBroker, ctx, req) + } + + // initialize the dataguardbroker resource status + if dataguardBroker.Status.Status == "" { + r.Recorder.Eventf(&dataguardBroker, corev1.EventTypeNormal, "Status Initialization", "initializing status fields for the resource") + log.Info("Initializing status fields") + dataguardBroker.Status.Status = dbcommons.StatusCreating + dataguardBroker.Status.ExternalConnectString = dbcommons.ValueUnavailable + dataguardBroker.Status.ClusterConnectString = dbcommons.ValueUnavailable + dataguardBroker.Status.FastStartFailover = "false" + if len(dataguardBroker.Status.DatabasesInDataguardConfig) == 0 { + dataguardBroker.Status.DatabasesInDataguardConfig = map[string]string{} + } + } + + // Always refresh status before a reconcile + defer r.Status().Update(ctx, &dataguardBroker) + + // Mange DataguardBroker Creation + result, err := r.manageDataguardBrokerCreation(&dataguardBroker, ctx, req) + if err != nil { + return ctrl.Result{Requeue: false}, err + } + if result.Requeue { + return result, nil + } + + // manage enabling and disabling faststartfailover + if dataguardBroker.Spec.FastStartFailover { + + for _, DbResource := range dataguardBroker.Status.DatabasesInDataguardConfig { + var singleInstanceDatabase dbapi.SingleInstanceDatabase + if err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: DbResource}, &singleInstanceDatabase); err != nil { + return ctrl.Result{Requeue: false}, err + } + r.Log.Info("Check the role for database", "database", singleInstanceDatabase.Name, "role", singleInstanceDatabase.Status.Role) + if singleInstanceDatabase.Status.Role == "SNAPSHOT_STANDBY" { + r.Recorder.Eventf(&dataguardBroker, corev1.EventTypeWarning, "Enabling FSFO failed", "database %s is a snapshot database", singleInstanceDatabase.Name) + r.Log.Info("Enabling FSFO failed, one of the database is a snapshot database", "snapshot database", singleInstanceDatabase.Name) + return ctrl.Result{Requeue: true}, nil + } + } + + // set faststartfailover targets for all the singleinstancedatabases in the dataguard configuration + if err := setFSFOTargets(r, &dataguardBroker, ctx, req); err != nil { + return ctrl.Result{Requeue: false}, err + } + + // enable faststartfailover in the dataguard configuration + if err := enableFSFOForDgConfig(r, &dataguardBroker, ctx, req); err != nil { + return ctrl.Result{Requeue: false}, err + } + + // create Observer Pod + if err := createObserverPods(r, &dataguardBroker, ctx, req); err != nil { + return ctrl.Result{Requeue: false}, err + } + + // set faststartfailover status to true + dataguardBroker.Status.FastStartFailover = "true" + + } else { + + // disable faststartfailover + if err := disableFSFOForDGConfig(r, &dataguardBroker, ctx, req); err != nil { + return ctrl.Result{Requeue: false}, err + } + + // delete Observer Pod + observerReadyPod, _, _, _, err := dbcommons.FindPods(r, "", "", dataguardBroker.Name, dataguardBroker.Namespace, ctx, req) + if err != nil { + return ctrl.Result{Requeue: false}, err + } + if observerReadyPod.Name != "" { + if err := r.Delete(ctx, &observerReadyPod); err != nil { + return ctrl.Result{Requeue: false}, err + } + } + + r.Recorder.Eventf(&dataguardBroker, corev1.EventTypeNormal, "Observer Deleted", "database observer pod deleted") + log.Info("database observer deleted") + + // set faststartfailover status to false + dataguardBroker.Status.FastStartFailover = "false" + } + + // manage manual switchover + if dataguardBroker.Spec.SetAsPrimaryDatabase != "" && dataguardBroker.Spec.SetAsPrimaryDatabase != dataguardBroker.Status.PrimaryDatabase { + if _, ok := dataguardBroker.Status.DatabasesInDataguardConfig[dataguardBroker.Spec.SetAsPrimaryDatabase]; !ok { + r.Recorder.Eventf(&dataguardBroker, corev1.EventTypeWarning, "Cannot Switchover", fmt.Sprintf("database with SID %v not found in dataguardbroker configuration", dataguardBroker.Spec.SetAsPrimaryDatabase)) + log.Info(fmt.Sprintf("cannot perform switchover, database with SID %v not found in dataguardbroker configuration", dataguardBroker.Spec.SetAsPrimaryDatabase)) + return ctrl.Result{Requeue: false}, nil + } + r.Recorder.Eventf(&dataguardBroker, corev1.EventTypeWarning, "Manual Switchover", fmt.Sprintf("Switching over to %s database", dataguardBroker.Status.DatabasesInDataguardConfig[dataguardBroker.Spec.SetAsPrimaryDatabase])) + log.Info(fmt.Sprintf("switching over to %s database", dataguardBroker.Status.DatabasesInDataguardConfig[dataguardBroker.Spec.SetAsPrimaryDatabase])) + result, err := r.manageManualSwitchOver(dataguardBroker.Spec.SetAsPrimaryDatabase, &dataguardBroker, ctx, req) + if err != nil { + return ctrl.Result{Requeue: false}, err + } + if result.Requeue { + return result, nil + } + } + + // Update Status for broker and sidb resources + if err := updateReconcileStatus(r, &dataguardBroker, ctx, req); err != nil { + return ctrl.Result{Requeue: false}, err + } + + dataguardBroker.Status.Status = dbcommons.StatusReady + log.Info("Reconcile Completed") + + if dataguardBroker.Spec.FastStartFailover { + return ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil + } else { + return ctrl.Result{Requeue: false}, nil + } +} + +// ############################################################################################################################# +// +// Manage deletion and clean up of the dataguardBroker resource +// +// ############################################################################################################################# +func (r *DataguardBrokerReconciler) manageDataguardBrokerDeletion(broker *dbapi.DataguardBroker, ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + + log := r.Log.WithValues("manageDataguardBrokerDeletion", req.NamespacedName) + + log.Info(fmt.Sprintf("Deleting dataguard broker %v", broker.Name)) + // Check if the DataguardBroker instance is marked to be deleted, which is + // indicated by the deletion timestamp being set. + if controllerutil.ContainsFinalizer(broker, dataguardBrokerFinalizer) { + // Run finalization logic for dataguardBrokerFinalizer. If the + // finalization logic fails, don't remove the finalizer so + // that we can retry during the next reconciliation. + if err := cleanupDataguardBroker(r, broker, req, ctx); err != nil { + // handle the errors + return ctrl.Result{Requeue: false}, err + } + + // Remove dataguardBrokerFinalizer. Once all finalizers have been + // removed, the object will be deleted. + controllerutil.RemoveFinalizer(broker, dataguardBrokerFinalizer) + if err := r.Update(ctx, broker); err != nil { + r.Recorder.Eventf(broker, corev1.EventTypeWarning, "Updating Resource", "Error while removing resource finalizers") + log.Info("Error while removing resource finalizers") + return ctrl.Result{Requeue: false}, err + } + } + return ctrl.Result{Requeue: false}, nil +} + +// ############################################################################################################################# +// +// Manage validation of singleinstancedatabases and creation of the dataguard configuration +// +// ############################################################################################################################# +func (r *DataguardBrokerReconciler) manageDataguardBrokerCreation(broker *dbapi.DataguardBroker, ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + + log := r.Log.WithValues("manageDataguardBrokerCreation", req.NamespacedName) + + // Add finalizer for this dataguardbroker resource + if !controllerutil.ContainsFinalizer(broker, dataguardBrokerFinalizer) { + r.Recorder.Eventf(broker, corev1.EventTypeNormal, "Updating Resource", "Adding finalizers") + log.Info("Adding finalizer") + controllerutil.AddFinalizer(broker, dataguardBrokerFinalizer) + if err := r.Update(ctx, broker); err != nil { + return ctrl.Result{Requeue: false}, err + } + } + + // Check if a service for the dataguardbroker resources exists + var service corev1.Service + if err := r.Get(context.TODO(), types.NamespacedName{Name: broker.Name, Namespace: broker.Namespace}, &service); err != nil { + // check if the required service is not found then create the service + if apierrors.IsNotFound(err) { + r.Recorder.Eventf(broker, corev1.EventTypeNormal, "Creating Service", "creating service for the resource") + log.Info("creating service for the dataguardbroker resource") + + // instantiate the service specification + svc := dbcommons.NewRealServiceBuilder(). + SetName(broker.Name). + SetNamespace(broker.Namespace). + SetLabels(map[string]string{ + "app": broker.Name, + }). + SetAnnotation(func() map[string]string { + annotations := make(map[string]string) + if len(broker.Spec.ServiceAnnotations) != 0 { + for key, value := range broker.Spec.ServiceAnnotations { + annotations[key] = value + } + } + return annotations + }()). + SetPorts([]corev1.ServicePort{ + { + Name: "listener", + Port: 1521, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "xmldb", + Port: 5500, + Protocol: corev1.ProtocolTCP, + }, + }). + SetSelector(map[string]string{ + "app": broker.Name, + }). + SetType(func() corev1.ServiceType { + if broker.Spec.LoadBalancer { + return corev1.ServiceType("LoadBalancer") + } + return corev1.ServiceType("NodePort") + }()). + Build() + + // Set the ownership of the service object to the dataguard broker resource object + ctrl.SetControllerReference(broker, &svc, r.Scheme) + + // create the service for dataguardbroker resource + if err = r.Create(ctx, &svc); err != nil { + r.Recorder.Eventf(broker, corev1.EventTypeWarning, "Service Creation", "service creation failed") + log.Info("service creation failed") + return ctrl.Result{Requeue: false}, err + } else { + timeout := 30 + // Waiting for Service to get created as sometimes it takes some time to create a service . 30 seconds TImeout + err = dbcommons.WaitForStatusChange(r, svc.Name, broker.Namespace, ctx, req, time.Duration(timeout)*time.Second, "svc", "creation") + if err != nil { + log.Error(err, "Error in Waiting for svc status for Creation", "svc.Namespace", svc.Namespace, "SVC.Name", svc.Name) + return ctrl.Result{Requeue: false}, err + } + r.Recorder.Eventf(broker, corev1.EventTypeNormal, "Service Created", fmt.Sprintf("Succesfully Created New Service %v", svc.Name)) + log.Info("Succesfully Created New Service ", "Service.Name : ", svc.Name) + } + time.Sleep(10 * time.Second) + } else { + return ctrl.Result{Requeue: false}, err + } + } + + log.Info(" ", "Found Existing Service ", service.Name) + + // validate if all the databases have only one replicas + for _, databaseRef := range broker.GetDatabasesInDataGuardConfiguration() { + var singleinstancedatabase dbapi.SingleInstanceDatabase + if err := r.Get(ctx, types.NamespacedName{Name: databaseRef, Namespace: broker.Namespace}, &singleinstancedatabase); err != nil { + if apierrors.IsNotFound(err) { + broker.Status.Status = dbcommons.StatusError + r.Recorder.Eventf(broker, corev1.EventTypeWarning, "SingleInstanceDatabase Not Found", fmt.Sprintf("SingleInstanceDatabase %s not found", singleinstancedatabase.Name)) + log.Info(fmt.Sprintf("singleinstancedatabase %s not found", databaseRef)) + return ctrl.Result{Requeue: false}, nil + } + return ctrl.Result{Requeue: false}, err + } + if broker.Spec.FastStartFailover && singleinstancedatabase.Status.Replicas > 1 { + r.Recorder.Eventf(broker, corev1.EventTypeWarning, "SIDB Not supported", "dataguardbroker doesn't support multiple replicas sidb in FastStartFailover mode") + log.Info("dataguardbroker doesn't support multiple replicas sidb in FastStartFailover mode") + broker.Status.Status = dbcommons.StatusError + return ctrl.Result{Requeue: false}, nil + } + } + + // Get the current primary singleinstancedatabase resourcce + var sidb dbapi.SingleInstanceDatabase + namespacedName := types.NamespacedName{ + Namespace: broker.Namespace, + Name: broker.GetCurrentPrimaryDatabase(), + } + if err := r.Get(ctx, namespacedName, &sidb); err != nil { + if apierrors.IsNotFound(err) { + broker.Status.Status = dbcommons.StatusError + r.Recorder.Eventf(broker, corev1.EventTypeWarning, "SingleInstanceDatabase Not Found", fmt.Sprintf("SingleInstanceDatabase %s not found", sidb.Name)) + log.Info(fmt.Sprintf("singleinstancedatabase %s not found", namespacedName.Name)) + return ctrl.Result{Requeue: false}, nil + } + return ctrl.Result{Requeue: false}, err + } + if sidb.Status.Role != "PRIMARY" { + r.Recorder.Eventf(broker, corev1.EventTypeWarning, "Spec Validation", fmt.Sprintf("singleInstanceDatabase %v not in primary role", sidb.Name)) + log.Info(fmt.Sprintf("singleinstancedatabase %s expected to be in primary role", sidb.Name)) + log.Info("updating database status to check for possible FSFO") + if err := updateReconcileStatus(r, broker, ctx, req); err != nil { + return ctrl.Result{Requeue: false}, err + } + return ctrl.Result{Requeue: true, RequeueAfter: 60 * time.Second}, nil + } + + // validate current primary singleinstancedatabase readiness + log.Info(fmt.Sprintf("Validating readiness for singleinstancedatabase %v", sidb.Name)) + if err := validateSidbReadiness(r, broker, &sidb, ctx, req); err != nil { + if errors.Is(err, ErrCurrentPrimaryDatabaseNotReady) { + fastStartFailoverStatus, _ := strconv.ParseBool(broker.Status.FastStartFailover) + if broker.Status.Status != "" && fastStartFailoverStatus { + r.Recorder.Eventf(broker, corev1.EventTypeNormal, "Possible Failover", "Primary db not in ready state after setting up DG configuration") + } + if err := updateReconcileStatus(r, broker, ctx, req); err != nil { + log.Info("Error updating Dgbroker status") + } + r.Recorder.Eventf(broker, corev1.EventTypeWarning, "Waiting", err.Error()) + return ctrl.Result{Requeue: true, RequeueAfter: 60 * time.Second}, nil + } + return ctrl.Result{Requeue: false}, err + } + + // setup dataguard configuration + log.Info(fmt.Sprintf("setup Dataguard configuration for primary database %v", sidb.Name)) + if err := setupDataguardBrokerConfiguration(r, broker, &sidb, ctx, req); err != nil { + return ctrl.Result{Requeue: false}, err + } + + return ctrl.Result{Requeue: false}, nil +} + +// ############################################################################################################################# +// +// Manange manual switchover to the target database +// +// ############################################################################################################################# +func (r *DataguardBrokerReconciler) manageManualSwitchOver(targetSidbSid string, broker *dbapi.DataguardBroker, ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + + log := r.Log.WithValues("SetAsPrimaryDatabase", req.NamespacedName) + + if _, ok := broker.Status.DatabasesInDataguardConfig[targetSidbSid]; !ok { + eventReason := "Cannot Switchover" + eventMsg := fmt.Sprintf("Database %s not a part of the dataguard configuration", targetSidbSid) + r.Recorder.Eventf(broker, corev1.EventTypeWarning, eventReason, eventMsg) + return ctrl.Result{Requeue: false}, nil + } + + // change broker status to updating to indicate manual switchover start + broker.Status.Status = dbcommons.StatusUpdating + r.Status().Update(ctx, broker) + + var sidb dbapi.SingleInstanceDatabase + if err := r.Get(context.TODO(), types.NamespacedName{Name: broker.GetCurrentPrimaryDatabase(), Namespace: broker.Namespace}, &sidb); err != nil { + return ctrl.Result{Requeue: false}, err + } + + // Fetch the primary database ready pod to create chk file + sidbReadyPod, _, _, _, err := dbcommons.FindPods(r, sidb.Spec.Image.Version, + sidb.Spec.Image.PullFrom, sidb.Name, sidb.Namespace, ctx, req) + if err != nil { + return ctrl.Result{Requeue: false}, err + } + + // Fetch the target database ready pod to create chk file + targetReadyPod, _, _, _, err := dbcommons.FindPods(r, "", "", broker.Status.DatabasesInDataguardConfig[targetSidbSid], req.Namespace, + ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: broker.Status.DatabasesInDataguardConfig[targetSidbSid], Namespace: req.Namespace}}) + if err != nil { + return ctrl.Result{Requeue: false}, err + } + + // Create a chk File so that no other pods take the lock during Switchover . + out, err := dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", dbcommons.CreateChkFileCMD) + if err != nil { + log.Error(err, err.Error()) + return ctrl.Result{Requeue: false}, err + } + log.Info("Successfully Created chk file " + out) + + out, err = dbcommons.ExecCommand(r, r.Config, targetReadyPod.Name, targetReadyPod.Namespace, "", ctx, req, false, "bash", "-c", dbcommons.CreateChkFileCMD) + if err != nil { + log.Error(err, err.Error()) + return ctrl.Result{Requeue: false}, err + } + log.Info("Successfully Created chk file " + out) + + eventReason := "Waiting" + eventMsg := "Switchover In Progress" + r.Recorder.Eventf(broker, corev1.EventTypeNormal, eventReason, eventMsg) + + // Get Admin password for current primary database + var adminPasswordSecret corev1.Secret + if err := r.Get(context.TODO(), types.NamespacedName{Name: sidb.Spec.AdminPassword.SecretName, Namespace: sidb.Namespace}, &adminPasswordSecret); err != nil { + return ctrl.Result{Requeue: false}, err + } + var adminPassword string = string(adminPasswordSecret.Data[sidb.Spec.AdminPassword.SecretKey]) + + // Connect to 'primarySid' db using dgmgrl and switchover to 'targetSidbSid' db to make 'targetSidbSid' db primary + _, err = dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, true, "bash", "-c", + fmt.Sprintf(dbcommons.CreateAdminPasswordFile, adminPassword)) + if err != nil { + log.Error(err, err.Error()) + return ctrl.Result{Requeue: false}, err + } + log.Info("DB Admin pwd file created") + + out, err = dbcommons.ExecCommand(r, r.Config, sidbReadyPod.Name, sidbReadyPod.Namespace, "", ctx, req, false, "bash", "-c", + fmt.Sprintf("dgmgrl sys@%s \"SWITCHOVER TO %s\" < admin.pwd", broker.Status.PrimaryDatabase, targetSidbSid)) + if err != nil { + log.Error(err, err.Error()) + return ctrl.Result{Requeue: false}, err + } + log.Info("SWITCHOVER TO " + targetSidbSid + " Output") + log.Info(out) + + return ctrl.Result{Requeue: false}, nil +} + +// ############################################################################################################################# +// +// Setup the controller with the Manager +// +// ############################################################################################################################# +func (r *DataguardBrokerReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&dbapi.DataguardBroker{}). + Owns(&corev1.Pod{}). //Watch for deleted pods of DataguardBroker Owner + WithEventFilter(dbcommons.ResourceEventHandler()). + WithOptions(controller.Options{MaxConcurrentReconciles: 100}). //ReconcileHandler is never invoked concurrently with the same object. + Complete(r) +} diff --git a/controllers/observability/databaseobserver_controller.go b/controllers/observability/databaseobserver_controller.go index bd58e71e..e17ee0b3 100644 --- a/controllers/observability/databaseobserver_controller.go +++ b/controllers/observability/databaseobserver_controller.go @@ -44,6 +44,7 @@ import ( "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" apiError "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -55,7 +56,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "time" - apiv1 "github.com/oracle/oracle-database-operator/apis/observability/v1alpha1" + api "github.com/oracle/oracle-database-operator/apis/observability/v4" constants "github.com/oracle/oracle-database-operator/commons/observability" ) @@ -90,8 +91,8 @@ func (r *DatabaseObserverReconciler) Reconcile(ctx context.Context, req ctrl.Req r.Log.WithName(constants.LogReconcile).Info(constants.LogCRStart, "NamespacedName", req.NamespacedName) // fetch databaseObserver - api := &apiv1.DatabaseObserver{} - if e := r.Get(context.TODO(), req.NamespacedName, api); e != nil { + a := &api.DatabaseObserver{} + if e := r.Get(context.TODO(), req.NamespacedName, a); e != nil { // if CR is not found or does not exist then // consider either CR has been deleted @@ -101,7 +102,7 @@ func (r *DatabaseObserverReconciler) Reconcile(ctx context.Context, req ctrl.Req } r.Log.WithName(constants.LogReconcile).Error(e, constants.ErrorCRRetrieve) - r.Recorder.Event(api, corev1.EventTypeWarning, constants.EventReasonFailedCRRetrieval, constants.EventMessageFailedCRRetrieval) + r.Recorder.Event(a, corev1.EventTypeWarning, constants.EventReasonFailedCRRetrieval, constants.EventMessageFailedCRRetrieval) return ctrl.Result{}, e } @@ -110,19 +111,19 @@ func (r *DatabaseObserverReconciler) Reconcile(ctx context.Context, req ctrl.Req defer r.validateCustomResourceReadiness(ctx, req) // initialize databaseObserver custom resource - if e := r.initialize(ctx, api, req); e != nil { + if e := r.initialize(ctx, a, req); e != nil { return ctrl.Result{}, e } // validate specs - if e := r.validateSpecs(api); e != nil { - meta.SetStatusCondition(&api.Status.Conditions, metav1.Condition{ + if e := r.validateSpecs(a); e != nil { + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ Type: constants.IsExporterDeploymentReady, Status: metav1.ConditionFalse, Reason: constants.ReasonDeploymentSpecValidationFailed, Message: constants.MessageExporterDeploymentSpecValidationFailed, }) - if e := r.Status().Update(ctx, api); e != nil { + if e := r.Status().Update(ctx, a); e != nil { r.Log.WithName(constants.LogReconcile).Error(e, constants.ErrorStatusUpdate) } r.Log.WithName(constants.LogExportersDeploy).Error(e, constants.ErrorSpecValidationFailedDueToAnError) @@ -131,50 +132,74 @@ func (r *DatabaseObserverReconciler) Reconcile(ctx context.Context, req ctrl.Req // create resource if they do not exist exporterDeployment := &ObservabilityDeploymentResource{} - if res, e := r.createResourceIfNotExists(exporterDeployment, api, ctx, req); e != nil { + if res, e := r.createResourceIfNotExists(exporterDeployment, a, ctx, req); e != nil { return res, e } - if res, e := r.checkDeploymentForUpdates(exporterDeployment, api, ctx, req); e != nil { - meta.SetStatusCondition(&api.Status.Conditions, metav1.Condition{ + // otherwise, check for updates on resource for any changes + if res, e := r.checkResourceForUpdates(exporterDeployment, a, ctx, req); e != nil { + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ Type: constants.IsExporterDeploymentReady, Status: metav1.ConditionFalse, - Reason: constants.ReasonDeploymentUpdateFailed, - Message: constants.MessageExporterDeploymentUpdateFailed, + Reason: constants.ReasonResourceUpdateFailed, + Message: constants.MessageExporterResourceUpdateFailed, }) return res, e } exporterService := &ObservabilityServiceResource{} - if res, e := r.createResourceIfNotExists(exporterService, api, ctx, req); e != nil { + if res, e := r.createResourceIfNotExists(exporterService, a, ctx, req); e != nil { + return res, e + } + + // otherwise, check for updates on resource for any changes + if res, e := r.checkResourceForUpdates(exporterService, a, ctx, req); e != nil { + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ + Type: constants.IsExporterServiceReady, + Status: metav1.ConditionFalse, + Reason: constants.ReasonResourceUpdateFailed, + Message: constants.MessageExporterResourceUpdateFailed, + }) return res, e } exporterServiceMonitor := &ObservabilityServiceMonitorResource{} - if res, e := r.createResourceIfNotExists(exporterServiceMonitor, api, ctx, req); e != nil { + if res, e := r.createResourceIfNotExists(exporterServiceMonitor, a, ctx, req); e != nil { + return res, e + } + + // otherwise, check for updates on resource for any changes + if res, e := r.checkResourceForUpdates(exporterServiceMonitor, a, ctx, req); e != nil { + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ + Type: constants.IsExporterServiceMonitorReady, + Status: metav1.ConditionFalse, + Reason: constants.ReasonResourceUpdateFailed, + Message: constants.MessageExporterResourceUpdateFailed, + }) return res, e } // check if deployment pods are ready - return r.validateDeploymentReadiness(api, ctx, req) + return r.validateDeploymentReadiness(a, ctx, req) } // initialize method sets the initial status to PENDING, exporterConfig and sets the base condition -func (r *DatabaseObserverReconciler) initialize(ctx context.Context, api *apiv1.DatabaseObserver, req ctrl.Request) error { +func (r *DatabaseObserverReconciler) initialize(ctx context.Context, a *api.DatabaseObserver, req ctrl.Request) error { - if api.Status.Conditions == nil || len(api.Status.Conditions) == 0 { + if a.Status.Conditions == nil || len(a.Status.Conditions) == 0 { // set condition - meta.SetStatusCondition(&api.Status.Conditions, metav1.Condition{ + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ Type: constants.IsCRAvailable, Status: metav1.ConditionFalse, Reason: constants.ReasonInitStart, Message: constants.MessageCRInitializationStarted, }) - api.Status.Status = string(constants.StatusObservabilityPending) - api.Status.ExporterConfig = constants.UnknownValue - if e := r.Status().Update(ctx, api); e != nil { + a.Status.Status = string(constants.StatusObservabilityPending) + a.Status.ExporterConfig = constants.UnknownValue + a.Status.Version = constants.UnknownValue + if e := r.Status().Update(ctx, a); e != nil { r.Log.WithName(constants.LogReconcile).Error(e, constants.ErrorStatusUpdate) return e } @@ -185,45 +210,45 @@ func (r *DatabaseObserverReconciler) initialize(ctx context.Context, api *apiv1. } // validateSpecs method checks the values and secrets passed in the spec -func (r *DatabaseObserverReconciler) validateSpecs(api *apiv1.DatabaseObserver) error { +func (r *DatabaseObserverReconciler) validateSpecs(a *api.DatabaseObserver) error { // If either Vault Fields are empty, then assume a DBPassword secret is supplied. If the DBPassword secret not found, then error out - if api.Spec.Database.DBPassword.VaultOCID == "" || api.Spec.Database.DBPassword.VaultSecretName == "" { + if a.Spec.Database.DBPassword.VaultOCID == "" || a.Spec.Database.DBPassword.VaultSecretName == "" { dbSecret := &corev1.Secret{} - if e := r.Get(context.TODO(), types.NamespacedName{Name: api.Spec.Database.DBPassword.SecretName, Namespace: api.Namespace}, dbSecret); e != nil { - r.Recorder.Event(api, corev1.EventTypeWarning, constants.EventReasonSpecError, constants.EventMessageSpecErrorDBPasswordSecretMissing) + if e := r.Get(context.TODO(), types.NamespacedName{Name: a.Spec.Database.DBPassword.SecretName, Namespace: a.Namespace}, dbSecret); e != nil { + r.Recorder.Event(a, corev1.EventTypeWarning, constants.EventReasonSpecError, constants.EventMessageSpecErrorDBPasswordSecretMissing) return e } } // Does DB Connection String Secret Name actually exist dbConnectSecret := &corev1.Secret{} - if e := r.Get(context.TODO(), types.NamespacedName{Name: api.Spec.Database.DBConnectionString.SecretName, Namespace: api.Namespace}, dbConnectSecret); e != nil { - r.Recorder.Event(api, corev1.EventTypeWarning, constants.EventReasonSpecError, constants.EventMessageSpecErrorDBConnectionStringSecretMissing) + if e := r.Get(context.TODO(), types.NamespacedName{Name: a.Spec.Database.DBConnectionString.SecretName, Namespace: a.Namespace}, dbConnectSecret); e != nil { + r.Recorder.Event(a, corev1.EventTypeWarning, constants.EventReasonSpecError, constants.EventMessageSpecErrorDBConnectionStringSecretMissing) return e } // Does DB User String Secret Name actually exist dbUserSecret := &corev1.Secret{} - if e := r.Get(context.TODO(), types.NamespacedName{Name: api.Spec.Database.DBUser.SecretName, Namespace: api.Namespace}, dbUserSecret); e != nil { - r.Recorder.Event(api, corev1.EventTypeWarning, constants.EventReasonSpecError, constants.EventMessageSpecErrorDBPUserSecretMissing) + if e := r.Get(context.TODO(), types.NamespacedName{Name: a.Spec.Database.DBUser.SecretName, Namespace: a.Namespace}, dbUserSecret); e != nil { + r.Recorder.Event(a, corev1.EventTypeWarning, constants.EventReasonSpecError, constants.EventMessageSpecErrorDBPUserSecretMissing) return e } // Does a custom configuration configmap actually exist, if provided - if configurationCMName := api.Spec.Exporter.ExporterConfig.Configmap.Name; configurationCMName != "" { + if configurationCMName := a.Spec.ExporterConfig.Configmap.Name; configurationCMName != "" { configurationCM := &corev1.ConfigMap{} - if e := r.Get(context.TODO(), types.NamespacedName{Name: configurationCMName, Namespace: api.Namespace}, configurationCM); e != nil { - r.Recorder.Event(api, corev1.EventTypeWarning, constants.EventReasonSpecError, constants.EventMessageSpecErrorConfigmapMissing) + if e := r.Get(context.TODO(), types.NamespacedName{Name: configurationCMName, Namespace: a.Namespace}, configurationCM); e != nil { + r.Recorder.Event(a, corev1.EventTypeWarning, constants.EventReasonSpecError, constants.EventMessageSpecErrorConfigmapMissing) return e } } // Does DBWallet actually exist, if provided - if dbWalletSecretName := api.Spec.Database.DBWallet.SecretName; dbWalletSecretName != "" { + if dbWalletSecretName := a.Spec.Database.DBWallet.SecretName; dbWalletSecretName != "" { dbWalletSecret := &corev1.Secret{} - if e := r.Get(context.TODO(), types.NamespacedName{Name: dbWalletSecretName, Namespace: api.Namespace}, dbWalletSecret); e != nil { - r.Recorder.Event(api, corev1.EventTypeWarning, constants.EventReasonSpecError, constants.EventMessageSpecErrorDBWalletSecretMissing) + if e := r.Get(context.TODO(), types.NamespacedName{Name: dbWalletSecretName, Namespace: a.Namespace}, dbWalletSecret); e != nil { + r.Recorder.Event(a, corev1.EventTypeWarning, constants.EventReasonSpecError, constants.EventMessageSpecErrorDBWalletSecretMissing) return e } } @@ -232,17 +257,17 @@ func (r *DatabaseObserverReconciler) validateSpecs(api *apiv1.DatabaseObserver) } // createResourceIfNotExists method creates an ObserverResource if they have not yet been created -func (r *DatabaseObserverReconciler) createResourceIfNotExists(or ObserverResource, api *apiv1.DatabaseObserver, ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *DatabaseObserverReconciler) createResourceIfNotExists(or ObserverResource, a *api.DatabaseObserver, ctx context.Context, req ctrl.Request) (ctrl.Result, error) { conditionType, logger, groupVersionKind := or.identify() // update after - defer r.Status().Update(ctx, api) + defer r.Status().Update(ctx, a) - // generate desired object based on api.Spec - desiredObj, genErr := or.generate(api, r.Scheme) + // generate desired object based on a.Spec + desiredObj, genErr := or.generate(a, r.Scheme) if genErr != nil { - meta.SetStatusCondition(&api.Status.Conditions, metav1.Condition{ + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ Type: conditionType, Status: metav1.ConditionFalse, Reason: constants.ReasonGeneralResourceGenerationFailed, @@ -260,7 +285,7 @@ func (r *DatabaseObserverReconciler) createResourceIfNotExists(or ObserverResour if getErr != nil && apiError.IsNotFound(getErr) { if e := r.Create(context.TODO(), desiredObj); e != nil { // create - meta.SetStatusCondition(&api.Status.Conditions, metav1.Condition{ + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ Type: conditionType, Status: metav1.ConditionFalse, Reason: constants.ReasonGeneralResourceCreationFailed, @@ -271,7 +296,7 @@ func (r *DatabaseObserverReconciler) createResourceIfNotExists(or ObserverResour } // mark ready if created - meta.SetStatusCondition(&api.Status.Conditions, metav1.Condition{ + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ Type: conditionType, Status: metav1.ConditionTrue, Reason: constants.ReasonGeneralResourceCreated, @@ -280,7 +305,7 @@ func (r *DatabaseObserverReconciler) createResourceIfNotExists(or ObserverResour r.Log.WithName(logger).Info(constants.LogResourceCreated, "ResourceName", desiredObj.GetName(), "Kind", groupVersionKind, "Namespace", req.Namespace) } else if getErr != nil { // if an error occurred - meta.SetStatusCondition(&api.Status.Conditions, metav1.Condition{ + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ Type: conditionType, Status: metav1.ConditionFalse, Reason: constants.ReasonGeneralResourceValidationFailureDueToError, @@ -290,7 +315,7 @@ func (r *DatabaseObserverReconciler) createResourceIfNotExists(or ObserverResour return ctrl.Result{}, getErr } else if getErr == nil && conditionType != constants.IsExporterDeploymentReady { // exclude deployment - meta.SetStatusCondition(&api.Status.Conditions, metav1.Condition{ + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ Type: conditionType, Status: metav1.ConditionTrue, Reason: constants.ReasonGeneralResourceValidationCompleted, @@ -304,138 +329,68 @@ func (r *DatabaseObserverReconciler) createResourceIfNotExists(or ObserverResour return ctrl.Result{}, nil } -// checkDeploymentForUpdates method checks the deployment if it needs to be updated -func (r *DatabaseObserverReconciler) checkDeploymentForUpdates(or ObserverResource, api *apiv1.DatabaseObserver, ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +// checkResourceForUpdates method checks the resource if it needs to be updated, updates if changes are found +func (r *DatabaseObserverReconciler) checkResourceForUpdates(or ObserverResource, a *api.DatabaseObserver, ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - // declare - foundDeployment := &appsv1.Deployment{} + conditionType, logName, groupVersionKind := or.identify() - // generate object - desiredObj, genErr := or.generate(api, r.Scheme) + // generate desired object + dO, genErr := or.generate(a, r.Scheme) if genErr != nil { return ctrl.Result{}, genErr } - // convert - desiredDeployment := &appsv1.Deployment{} - if e := r.Scheme.Convert(desiredObj, desiredDeployment, nil); e != nil { - return ctrl.Result{}, e - } - - // retrieve latest deployment - if e := r.Get(context.TODO(), types.NamespacedName{Name: desiredObj.GetName(), Namespace: req.Namespace}, foundDeployment); e != nil { - return ctrl.Result{}, e - } - // check for containerImage - if constants.IsUpdateRequiredForContainerImage(desiredDeployment, foundDeployment) { - foundDeployment.Spec.Template.Spec.Containers[0].Image = constants.GetExporterImage(api) - - if e := r.updateDeployment(api, ctx, req, foundDeployment, constants.MessageExporterDeploymentImageUpdated, constants.EventMessageUpdatedImageSucceeded); e != nil { - return ctrl.Result{}, e - } - } - - // retrieve latest deployment - if e := r.Get(context.TODO(), types.NamespacedName{Name: desiredObj.GetName(), Namespace: req.Namespace}, foundDeployment); e != nil { + // convert dO -> d + d := &unstructured.Unstructured{} + d.SetGroupVersionKind(groupVersionKind) + if e := r.Scheme.Convert(dO, d, nil); e != nil { return ctrl.Result{}, e } - // check environment variables - if constants.IsUpdateRequiredForEnvironmentVars(desiredDeployment, foundDeployment) { - foundDeployment.Spec.Template.Spec.Containers[0].Env = constants.GetExporterEnvs(api) - - if e := r.updateDeployment(api, ctx, req, foundDeployment, constants.MessageExporterDeploymentEnvironmentUpdated, constants.EventMessageUpdatedEnvironmentSucceeded); e != nil { - return ctrl.Result{}, e - } - } - // retrieve latest deployment - foundDeployment = &appsv1.Deployment{} - if e := r.Get(context.TODO(), types.NamespacedName{Name: desiredObj.GetName(), Namespace: req.Namespace}, foundDeployment); e != nil { + // declare found + // retrieve latest into f + f := &unstructured.Unstructured{} + f.SetGroupVersionKind(groupVersionKind) + if e := r.Get(context.TODO(), types.NamespacedName{Name: dO.GetName(), Namespace: req.Namespace}, f); e != nil { return ctrl.Result{}, e } - // check config-volume, creds and ocikey - if constants.IsUpdateRequiredForVolumes(desiredDeployment, foundDeployment) { - foundDeployment.Spec.Template.Spec.Volumes = constants.GetExporterDeploymentVolumes(api) - foundDeployment.Spec.Template.Spec.Containers[0].VolumeMounts = constants.GetExporterDeploymentVolumeMounts(api) - if e := r.updateDeployment(api, ctx, req, foundDeployment, constants.MessageExporterDeploymentVolumesUpdated, constants.EventMessageUpdatedVolumesSucceeded); e != nil { - return ctrl.Result{}, e - } - } - - // update status for exporter config - var setConfigmapNameStatus string - for _, v := range desiredDeployment.Spec.Template.Spec.Volumes { - if v.Name == constants.DefaultConfigVolumeString { - setConfigmapNameStatus = v.ConfigMap.Name - api.Status.ExporterConfig = setConfigmapNameStatus - } - } - if api.Status.ExporterConfig != setConfigmapNameStatus { - api.Status.ExporterConfig = constants.DefaultValue - } - r.Status().Update(ctx, api) - - // retrieve latest deployment - foundDeployment = &appsv1.Deployment{} - if e := r.Get(context.TODO(), types.NamespacedName{Name: desiredObj.GetName(), Namespace: req.Namespace}, foundDeployment); e != nil { - return ctrl.Result{}, e - } - // check replicateCount - if constants.IsUpdateRequiredForReplicas(desiredDeployment, foundDeployment) { - desiredReplicaCount := constants.GetExporterReplicas(api) - foundDeployment.Spec.Replicas = &desiredReplicaCount + // check if something changed + if !equality.Semantic.DeepDerivative(d.Object, f.Object) { - if e := r.updateDeployment(api, ctx, req, foundDeployment, constants.MessageExporterDeploymentReplicaUpdated, constants.EventMessageUpdatedReplicaSucceeded); e != nil { + if e := r.Update(context.TODO(), d); e != nil { + r.Log.WithName(logName).Error(e, constants.LogErrorWithResourceUpdate, "ResourceName", f.GetName(), "Kind", groupVersionKind.Kind, "Namespace", req.Namespace) return ctrl.Result{}, e } - } - - return ctrl.Result{}, nil -} - -// updateDeployment method updates the deployment and sets the condition -func (r *DatabaseObserverReconciler) updateDeployment(api *apiv1.DatabaseObserver, ctx context.Context, req ctrl.Request, d *appsv1.Deployment, updateMessage string, recorderMessage string) error { - // make update - defer r.Status().Update(ctx, api) - - if e := r.Update(context.TODO(), d); e != nil { - meta.SetStatusCondition(&api.Status.Conditions, metav1.Condition{ - Type: constants.IsExporterDeploymentReady, + // update completed, however the pods needs to be validated for readiness + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ + Type: conditionType, Status: metav1.ConditionFalse, - Reason: constants.ReasonDeploymentUpdateFailed, - Message: constants.MessageExporterDeploymentUpdateFailed, + Reason: constants.ReasonResourceUpdated, + Message: constants.MessageExporterResourceUpdated, }) - r.Log.WithName(constants.LogExportersDeploy).Error(e, constants.ErrorDeploymentUpdate, "ResourceName", d.GetName(), "Kind", "Deployment", "Namespace", req.Namespace) - return e + r.Log.WithName(logName).Info(constants.LogSuccessWithResourceUpdate, "ResourceName", f.GetName(), "Kind", groupVersionKind.Kind, "Namespace", req.Namespace) + r.Recorder.Event(a, corev1.EventTypeNormal, constants.EventReasonUpdateSucceeded, groupVersionKind.Kind+" is updated.") + r.Status().Update(ctx, a) } - // update completed, however the pods needs to be validated for readiness - meta.SetStatusCondition(&api.Status.Conditions, metav1.Condition{ - Type: constants.IsExporterDeploymentReady, - Status: metav1.ConditionFalse, - Reason: constants.ReasonDeploymentUpdated, - Message: updateMessage, - }) - r.Log.WithName(constants.LogExportersDeploy).Info(constants.LogResourceUpdated, "ResourceName", d.GetName(), "Kind", "Deployment", "Namespace", req.Namespace) - r.Recorder.Event(api, corev1.EventTypeNormal, constants.EventReasonUpdateSucceeded, recorderMessage) + return ctrl.Result{}, nil - return nil } // validateDeploymentReadiness method evaluates deployment readiness by checking the status of all deployment pods -func (r *DatabaseObserverReconciler) validateDeploymentReadiness(api *apiv1.DatabaseObserver, ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *DatabaseObserverReconciler) validateDeploymentReadiness(a *api.DatabaseObserver, ctx context.Context, req ctrl.Request) (ctrl.Result, error) { d := &appsv1.Deployment{} - rName := constants.DefaultExporterDeploymentPrefix + api.Name + rName := a.Name // update after - defer r.Status().Update(ctx, api) + defer r.Status().Update(ctx, a) // get latest deployment - if e := r.Get(context.TODO(), types.NamespacedName{Name: rName, Namespace: api.Namespace}, d); e != nil { - meta.SetStatusCondition(&api.Status.Conditions, metav1.Condition{ + if e := r.Get(context.TODO(), types.NamespacedName{Name: rName, Namespace: a.Namespace}, d); e != nil { + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ Type: constants.IsExporterDeploymentReady, Status: metav1.ConditionFalse, Reason: constants.ReasonGeneralResourceValidationFailureDueToError, @@ -445,16 +400,14 @@ func (r *DatabaseObserverReconciler) validateDeploymentReadiness(api *apiv1.Data } // get deployment labels - labels := d.Spec.Template.Labels - cLabels := client.MatchingLabels{} - for k, v := range labels { - cLabels[k] = v + cLabels := client.MatchingLabels{ + "app": a.Name, } // list pods pods := &corev1.PodList{} if e := r.List(context.TODO(), pods, []client.ListOption{cLabels}...); e != nil { - meta.SetStatusCondition(&api.Status.Conditions, metav1.Condition{ + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ Type: constants.IsExporterDeploymentReady, Status: metav1.ConditionFalse, Reason: constants.ReasonDeploymentFailed, @@ -466,7 +419,7 @@ func (r *DatabaseObserverReconciler) validateDeploymentReadiness(api *apiv1.Data // check each pod phase for _, pod := range pods.Items { if pod.Status.Phase == corev1.PodFailed { - meta.SetStatusCondition(&api.Status.Conditions, metav1.Condition{ + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ Type: constants.IsExporterDeploymentReady, Status: metav1.ConditionFalse, Reason: constants.ReasonDeploymentFailed, @@ -475,7 +428,7 @@ func (r *DatabaseObserverReconciler) validateDeploymentReadiness(api *apiv1.Data return ctrl.Result{}, errors.New(constants.ErrorDeploymentPodsFailure) } else if pod.Status.Phase != corev1.PodRunning { // pod could be creating, - meta.SetStatusCondition(&api.Status.Conditions, metav1.Condition{ + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ Type: constants.IsExporterDeploymentReady, Status: metav1.ConditionUnknown, Reason: constants.ReasonDeploymentPending, @@ -486,12 +439,14 @@ func (r *DatabaseObserverReconciler) validateDeploymentReadiness(api *apiv1.Data } // once all pods are found to be running, mark deployment as ready and the exporter as ready - meta.SetStatusCondition(&api.Status.Conditions, metav1.Condition{ + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ Type: constants.IsExporterDeploymentReady, Status: metav1.ConditionTrue, Reason: constants.ReasonDeploymentSuccessful, Message: constants.MessageExporterDeploymentSuccessful, }) + a.Status.Version = constants.GetExporterVersion(a) + a.Status.ExporterConfig = constants.GetExporterConfig(a) return ctrl.Result{}, nil } @@ -499,48 +454,48 @@ func (r *DatabaseObserverReconciler) validateDeploymentReadiness(api *apiv1.Data func (r *DatabaseObserverReconciler) validateCustomResourceReadiness(ctx context.Context, req ctrl.Request) { // get latest object - api := &apiv1.DatabaseObserver{} - if e := r.Get(context.TODO(), req.NamespacedName, api); e != nil { + a := &api.DatabaseObserver{} + if e := r.Get(context.TODO(), req.NamespacedName, a); e != nil { r.Log.WithName(constants.LogReconcile).Error(e, constants.ErrorCRRetrieve) return } // make update - defer r.Status().Update(ctx, api) + defer r.Status().Update(ctx, a) - if meta.IsStatusConditionPresentAndEqual(api.Status.Conditions, constants.IsExporterDeploymentReady, metav1.ConditionUnknown) { - meta.SetStatusCondition(&api.Status.Conditions, metav1.Condition{ + if meta.IsStatusConditionPresentAndEqual(a.Status.Conditions, constants.IsExporterDeploymentReady, metav1.ConditionUnknown) { + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ Type: constants.IsCRAvailable, Status: metav1.ConditionFalse, Reason: constants.ReasonValidationInProgress, Message: constants.MessageCRValidationWaiting, }) - api.Status.Status = string(constants.StatusObservabilityPending) - } else if meta.IsStatusConditionFalse(api.Status.Conditions, constants.IsExporterDeploymentReady) || - meta.IsStatusConditionFalse(api.Status.Conditions, constants.IsExporterServiceReady) || - meta.IsStatusConditionFalse(api.Status.Conditions, constants.IsExporterServiceMonitorReady) { - meta.SetStatusCondition(&api.Status.Conditions, metav1.Condition{ + a.Status.Status = string(constants.StatusObservabilityPending) + } else if meta.IsStatusConditionFalse(a.Status.Conditions, constants.IsExporterDeploymentReady) || + meta.IsStatusConditionFalse(a.Status.Conditions, constants.IsExporterServiceReady) || + meta.IsStatusConditionFalse(a.Status.Conditions, constants.IsExporterServiceMonitorReady) { + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ Type: constants.IsCRAvailable, Status: metav1.ConditionFalse, Reason: constants.ReasonReadyFailed, Message: constants.MessageCRValidationFailed, }) - api.Status.Status = string(constants.StatusObservabilityError) + a.Status.Status = string(constants.StatusObservabilityError) } else { - meta.SetStatusCondition(&api.Status.Conditions, metav1.Condition{ + meta.SetStatusCondition(&a.Status.Conditions, metav1.Condition{ Type: constants.IsCRAvailable, Status: metav1.ConditionTrue, Reason: constants.ReasonReadyValidated, Message: constants.MessageCRValidated, }) - api.Status.Status = string(constants.StatusObservabilityReady) + a.Status.Status = string(constants.StatusObservabilityReady) } } // SetupWithManager sets up the controller with the Manager. func (r *DatabaseObserverReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&apiv1.DatabaseObserver{}). + For(&api.DatabaseObserver{}). Owns(&appsv1.Deployment{}). Owns(&corev1.Service{}). Complete(r) diff --git a/controllers/observability/databaseobserver_resource.go b/controllers/observability/databaseobserver_resource.go index 8c20ebe5..6be6f693 100644 --- a/controllers/observability/databaseobserver_resource.go +++ b/controllers/observability/databaseobserver_resource.go @@ -1,7 +1,7 @@ package controllers import ( - apiv1 "github.com/oracle/oracle-database-operator/apis/observability/v1alpha1" + api "github.com/oracle/oracle-database-operator/apis/observability/v4" constants "github.com/oracle/oracle-database-operator/commons/observability" monitorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" appsv1 "k8s.io/api/apps/v1" @@ -10,7 +10,6 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) @@ -25,55 +24,78 @@ type ObservabilityServiceResource struct{} type ObservabilityServiceMonitorResource struct{} type ObserverResource interface { - generate(*apiv1.DatabaseObserver, *runtime.Scheme) (*unstructured.Unstructured, error) + generate(*api.DatabaseObserver, *runtime.Scheme) (*unstructured.Unstructured, error) identify() (string, string, schema.GroupVersionKind) } -func (resource *ObservabilityDeploymentResource) generate(api *apiv1.DatabaseObserver, scheme *runtime.Scheme) (*unstructured.Unstructured, error) { - rName := constants.DefaultExporterDeploymentPrefix + api.Name +func (resource *ObservabilityDeploymentResource) generate(a *api.DatabaseObserver, scheme *runtime.Scheme) (*unstructured.Unstructured, error) { + rName := a.Name rContainerName := constants.DefaultExporterContainerName - rContainerImage := constants.GetExporterImage(api) - rVolumes := constants.GetExporterDeploymentVolumes(api) - rVolumeMounts := constants.GetExporterDeploymentVolumeMounts(api) - rSelectors := constants.GetExporterSelector(api) - rReplicas := constants.GetExporterReplicas(api) - rEnvs := constants.GetExporterEnvs(api) + rContainerImage := constants.GetExporterImage(a) + rArgs := constants.GetExporterArgs(a) + rCommands := constants.GetExporterCommands(a) + rVolumes := constants.GetExporterDeploymentVolumes(a) + rVolumeMounts := constants.GetExporterDeploymentVolumeMounts(a) + + rReplicas := constants.GetExporterReplicas(a) + rEnvs := constants.GetExporterEnvs(a) + + rLabels := constants.GetLabels(a, a.Spec.Exporter.Deployment.Labels) + rPodLabels := constants.GetLabels(a, a.Spec.Exporter.Deployment.DeploymentPodTemplate.Labels) + rSelector := constants.GetSelectorLabel(a) + + rDeploymentSecurityContext := constants.GetExporterDeploymentSecurityContext(a) + rPodSecurityContext := constants.GetExporterPodSecurityContext(a) rPort := []corev1.ContainerPort{ - {ContainerPort: 8080}, + {ContainerPort: constants.DefaultAppPort}, + } + + // exporterContainer + rContainers := make([]corev1.Container, 1) + rContainers[0] = corev1.Container{ + Image: rContainerImage, + ImagePullPolicy: corev1.PullAlways, + Name: rContainerName, + Env: rEnvs, + VolumeMounts: rVolumeMounts, + Ports: rPort, + Args: rArgs, + Command: rCommands, + SecurityContext: rDeploymentSecurityContext, } + constants.AddSidecarContainers(a, &rContainers) + constants.AddSidecarVolumes(a, &rVolumes) + + // additionalContainers + obj := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: rName, - Namespace: api.Namespace, + Namespace: a.Namespace, + Labels: rLabels, }, Spec: appsv1.DeploymentSpec{ Replicas: &rReplicas, Selector: &metav1.LabelSelector{ - MatchLabels: rSelectors, + MatchLabels: rSelector, }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: rSelectors, + Labels: rPodLabels, }, Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Image: rContainerImage, - ImagePullPolicy: corev1.PullAlways, - Name: rContainerName, - Env: rEnvs, - VolumeMounts: rVolumeMounts, - Ports: rPort, - }}, - RestartPolicy: corev1.RestartPolicyAlways, - Volumes: rVolumes, + Containers: rContainers, + RestartPolicy: corev1.RestartPolicyAlways, + Volumes: rVolumes, + SecurityContext: rPodSecurityContext, }, }, }, } - if err := controllerutil.SetControllerReference(api, obj, scheme); err != nil { + if err := controllerutil.SetControllerReference(a, obj, scheme); err != nil { return nil, err } @@ -84,32 +106,26 @@ func (resource *ObservabilityDeploymentResource) generate(api *apiv1.DatabaseObs return u, nil } -func (resource *ObservabilityServiceResource) generate(api *apiv1.DatabaseObserver, scheme *runtime.Scheme) (*unstructured.Unstructured, error) { - rServiceName := "obs-svc-" + api.Name - rLabels := constants.GetExporterLabels(api) - rPort := constants.GetExporterServicePort(api) - rSelector := constants.GetExporterSelector(api) +func (resource *ObservabilityServiceResource) generate(a *api.DatabaseObserver, scheme *runtime.Scheme) (*unstructured.Unstructured, error) { + rServiceName := a.Name + rLabels := constants.GetLabels(a, a.Spec.Exporter.Service.Labels) + rSelector := constants.GetSelectorLabel(a) + rPorts := constants.GetExporterServicePort(a) obj := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: rServiceName, Labels: rLabels, - Namespace: api.Namespace, + Namespace: a.Namespace, }, Spec: corev1.ServiceSpec{ - Type: "ClusterIP", + Type: constants.DefaultServiceType, Selector: rSelector, - Ports: []corev1.ServicePort{ - { - Name: "metrics", - Port: rPort, - TargetPort: intstr.FromInt32(constants.DefaultServiceTargetPort), - }, - }, + Ports: rPorts, }, } - if err := controllerutil.SetControllerReference(api, obj, scheme); err != nil { + if err := controllerutil.SetControllerReference(a, obj, scheme); err != nil { return nil, err } @@ -120,32 +136,32 @@ func (resource *ObservabilityServiceResource) generate(api *apiv1.DatabaseObserv return u, nil } -func (resource *ObservabilityServiceMonitorResource) generate(api *apiv1.DatabaseObserver, scheme *runtime.Scheme) (*unstructured.Unstructured, error) { - rName := constants.DefaultServiceMonitorPrefix + api.Name - rLabels := constants.GetExporterLabels(api) - rSelector := constants.GetExporterSelector(api) - rPort := constants.GetExporterServiceMonitorPort(api) - rInterval := "20s" +func (resource *ObservabilityServiceMonitorResource) generate(a *api.DatabaseObserver, scheme *runtime.Scheme) (*unstructured.Unstructured, error) { + rName := a.Name + rEndpoints := constants.GetEndpoints(a) + + rSelector := constants.GetSelectorLabel(a) + rLabels := constants.GetLabels(a, a.Spec.Prometheus.ServiceMonitor.Labels) + + smSpec := monitorv1.ServiceMonitorSpec{ + Endpoints: rEndpoints, + Selector: metav1.LabelSelector{ + MatchLabels: rSelector, + }, + } + constants.AddNamespaceSelector(a, &smSpec) obj := &monitorv1.ServiceMonitor{ ObjectMeta: metav1.ObjectMeta{ Name: rName, Labels: rLabels, - Namespace: api.Namespace, - }, - Spec: monitorv1.ServiceMonitorSpec{ - Endpoints: []monitorv1.Endpoint{{ - Interval: monitorv1.Duration(rInterval), - Port: rPort, - }}, - Selector: metav1.LabelSelector{ - MatchLabels: rSelector, - }, + Namespace: a.Namespace, }, + Spec: smSpec, } // set reference - if e := controllerutil.SetControllerReference(api, obj, scheme); e != nil { + if e := controllerutil.SetControllerReference(a, obj, scheme); e != nil { return nil, e } diff --git a/docs/adb/ADB_LONG_TERM_BACKUP.md b/docs/adb/ADB_LONG_TERM_BACKUP.md index 4720697d..312dac0d 100644 --- a/docs/adb/ADB_LONG_TERM_BACKUP.md +++ b/docs/adb/ADB_LONG_TERM_BACKUP.md @@ -2,13 +2,13 @@ To create long-term backups of Autonomous Databases, use this procedure. -Oracle Cloud Infrastructure (OCI) automatically backs up your Autonomous Databases, and retains these backups for 60 days. You can restore and recover your database to any point-in-time in this retention period. Automatic backups are full backups taken every 60 days, with daily incremental backups. You can also create long-term backups for your database with a retention period between 3 months and up to 10 years. For more information, please visit [Create Long-Term Backups on Autonomous Database](https://docs.oracle.com/en/cloud/paas/autonomous-database/serverless/adbsb/backup-long-term.html) and [Backup and Restore Notes](https://docs.oracle.com/en-us/iaas/autonomous-database-serverless/doc/backup-restore-notes.html). +Oracle Cloud Infrastructure (OCI) automatically backs up your Autonomous Databases, and retains these backups for 60 days. You can restore and recover your database to any point-in-time in this retention period. Automatic backups are full backups taken every 60 days, with daily incremental backups. You can also create long-term backups for your database with a retention period ranging from 3 months to 10 years. For more information, see: [Create Long-Term Backups on Autonomous Database](https://docs.oracle.com/en/cloud/paas/autonomous-database/serverless/adbsb/backup-long-term.html) and [Backup and Restore Notes](https://docs.oracle.com/en-us/iaas/autonomous-database-serverless/doc/backup-restore-notes.html). ## Create Long-Term Backup To back up an Autonomous Database, complete this procedure. -1. Add the following fields to the AutonomousDatabaseBackup resource definition. An example `.yaml` file is available here: [`config/samples/adb/autonomousdatabase_backup.yaml`](./../../config/samples/adb/autonomousdatabase_backup.yaml) +1. Add the following fields to the `AutonomousDatabaseBackup` resource definition. An example `.yaml` file is available here: [`config/samples/adb/autonomousdatabase_backup.yaml`](./../../config/samples/adb/autonomousdatabase_backup.yaml) | Attribute | Type | Description | Required? | |----|----|----|----| | `spec.displayName` | string | The user-friendly name for the backup. This name does not have to be unique. | Yes | diff --git a/docs/adb/ADB_PREREQUISITES.md b/docs/adb/ADB_PREREQUISITES.md index f8c04c4b..a730f4fe 100644 --- a/docs/adb/ADB_PREREQUISITES.md +++ b/docs/adb/ADB_PREREQUISITES.md @@ -48,7 +48,7 @@ After creating the ConfigMap and the Secret, use their names as the values of `o Instance principal authorization enables the operator to make API calls from an instance (that is, a node) without requiring the `ociConfigMap`, and `ociSecret` attributes in the `.yaml` file. This approach applies only to instances that are running in the Oracle Cloud Infrastructure (OCI). In addition, this approach grants permissions to the nodes that match the rules, which means that all the pods in the nodes can make the service calls. -To set up the instance principals, you will have to: +To set up the instance principals, complete the following tasks: * [Define dynamic group that includes the nodes in which the operator runs](#define-dynamic-group) * [Define policies that grant to the dynamic group the required permissions for the operator to its OCI interactions](#define-policies) @@ -147,4 +147,4 @@ kubectl create configmap oci-cred \ Allow any-user to manage all-resources in compartment where all {request.principal.namespace='oracle-database-operator-system',request.principal.type='workload',request.principal.cluster_id='',request.principal.service_account='default'} ``` -After creating the policy, operator pods will be granted sufficient permissions to call OCI services. You can now proceed to the installation. \ No newline at end of file +After creating the policy, operator pods will be granted sufficient permissions to call OCI services. You can now proceed to the installation. diff --git a/docs/adb/NETWORK_ACCESS_OPTIONS.md b/docs/adb/NETWORK_ACCESS_OPTIONS.md index e029b52d..e7eb0a56 100644 --- a/docs/adb/NETWORK_ACCESS_OPTIONS.md +++ b/docs/adb/NETWORK_ACCESS_OPTIONS.md @@ -7,77 +7,58 @@ Network access for Autonomous Database includes public access, and configuring s For more information about these options, see: [Configuring Network Access with Access Control Rules (ACLs) and Private Endpoints ](https://docs.oracle.com/en/cloud/paas/autonomous-database/adbsa/autonomous-network-access.html#GUID-D2D468C3-CA2D-411E-92BC-E122F795A413). ## Supported Features -Review the network access configuration options available to you with Autonomous Database. +Review the following options available to you with Autonomous Database. -### Types of Network Access +* [Configuring Network Access with Allowing Secure Access from Anywhere](#configuring-network-access-with-allowing-secure-access-from-anywhere) on shared Exadata infrastructure +* [Configuring Network Access with Access Control Rules (ACLs)](#configuring-network-access-with-access-control-rules-acls) on shared Exadata infrastructure +* [Configure Network Access with Private Endpoint Access Only](#configure-network-access-with-private-endpoint-access-only) on shared Exadata infrastructure +* [Allowing TLS or Require Only Mutual TLS (mTLS) Authentication](#allowing-tls-or-require-only-mutual-tls-mtls-authentication) on shared Exadata infrastructure +* [Autonomous Database with access control list enabled](#autonomous-database-with-access-control-list-enabled-on-dedicated-exadata-infrastructure) on dedicated Exadata infrastructure -There are three types of network access supported by Autonomous Database: +## Configuring Network Access with Allowing Secure Access from Anywhere -* **PUBLIC** +Before changing the Network Access to Allowing Secure Access from Anywhere, ensure that your network security protocol requries only mTLS (Mutual TLS) authentication. For more details, see: [Allow both TLS and mutual TLS (mTLS) authentication](#allow-both-tls-and-mutual-tls-mtls-authentication). If mTLS enforcement is already enabled on your Autonomous Database, then you can skip this step. - The Public option permits secure access from anywhere. The network access type is PUBLIC if no option is specified in the specification. With this option, mutual TLS (mTLS) authentication is always required to connect to the database. This option is available only for databases on shared Exadata infrastructure. +To specify that Autonomous Database can be connected from any location with a valid credential, complete one of the following procedures, based on your network access configuration. -* **RESTRICTED** - - The Restricted option permits connections to the database only as specified by the access control lists (ACLs) that you create. This option is available only for databases on shared Exadata infrastructure. - - You can add the following to your ACL: - * **IP Address**: Specify one or more individual public IP addresses. Use commas to delimit your addresses in the input field. - * **CIDR Block**: Specify one or more ranges of public IP addresses using CIDR notation. Use commas to separate your CIDR block entries in the input field. - * **Virtual Cloud Network (OCID)** (applies to Autonomous Databases on shared Exadata infrastructure): Specify the Oracle Cloud Identifier (OCID) of a virtual cloud network (VCN). If you want to specify multiple IP addresses or CIDR ranges within the same VCN, then do not create multiple access control list entries. Instead, use one access control list entry with the values for the multiple IP addresses or CIDR ranges, separated by commas. - -* **PRIVATE** - - The Private option creates a private endpoint for your database within a specified VCN. This option is available for databases on shared Exadata infrastructure, and is the only available option for databases on dedicated Exadata infrastructure. Review the private options for your configuration: - - * **Autonomous Databases on shared Exadata infrastructure**: - - This option permits access through private enpoints by specifying the OCIDs of a subnet and the network security groups (NSGs) under the same VCN in the specification. - - * **Autonomous Databases on dedicated Exadata infrastructure**: - - The network path to a dedicated Autonomous Database is through a VCN and subnet defined by the dedicated infrastucture hosting the database. Usually, the subnet is defined as private, which means that there is no public Internet access to the databases. - - Autonomous Database supports restricted access using an ACL. You have the option to enable an ACL by setting the `isAccessControlEnabled` parameter. If access is disabled, then database access is defined by the network security rules. If enabled, then database access is restricted to the IP addresses and CIDR blocks defined in the ACL. Note that enabling an ACL with an empty list of IP addresses makes the database inaccessible. See [Autonomous Database with Private Endpoint](https://docs.oracle.com/en-us/iaas/Content/Database/Concepts/adbsprivateaccess.htm) for overview and examples for private endpoint. - -### Allowing TLS or Require Only Mutual TLS (mTLS) Authentication - -If your Autonomous Database instance is configured to allow only mTLS connections, then you can reconfigure the instance to permit both mTLS and TLS connections. When you reconfigure the instance to permit both mTLS and TLS, you can use both authentication types at the same time, so that connections are no longer restricted to require mTLS authentication. - -This option only applies to Autonomous Databases on shared Exadata infrastructure. You can permit TLS connections when network access type is configured by using one of the following options: - -* **RESTRICTED**: with ACLs defined. -* **PRIVATE**: with a private endpoint defined. - -## Example YAML - -You can always configure the network access options when you create an Autonomous Database, or update the settings after you create the database. Following are some example YAMLs that show how to configure the networking with different network access options. - -For Autonomous Databases on shared Exadata infrastructure, review the following examples: - -* Configure network access [with PUBLIC access type](#autonomous-database-with-public-access-type-on-shared-exadata-infrastructure) -* Configure network access [with RESTRICTED access type](#autonomous-database-with-restricted-access-type-on-shared-exadata-infrastructure) -* Configure network access [with PRIVATE access type](#autonomous-database-with-private-access-type-on-shared-exadata-infrastructure) -* [Change the mutual TLS (mTLS) authentication setting](#allow-both-tls-and-mutual-tls-mtls-authentication-of-autonomous-database-on-shared-exadata-infrastructure) +### Option 1 - Change the Network Access from "Secure Access from Allowed IPs and VCNs Only" to "Allowing Secure Access from Anywhere" +1. Add the following parameters to the specification. An example file is availble here: [`config/samples/adb/autonomousdatabase_update_network_access.yaml`](./../../config/samples/adb/autonomousdatabase_update_network_access.yaml): -For Autonomous Databases on dedicated Exadata infrastructure, refiew the following examples: + | Attribute | Type | Description | + |----|----|----| + | `whitelistedIps` | []string | The client IP access control list (ACL). This feature is available for Autonomous Databases on [shared Exadata infrastructure](https://docs.cloud.oracle.com/Content/Database/Concepts/adboverview.htm#AEI) and on Exadata Cloud@Customer.
Only clients connecting from an IP address included in the ACL can access the Autonomous Database instance.

For shared Exadata infrastructure, this is an array of CIDR (Classless Inter-Domain Routing) notations for a subnet or VCN OCID.
Use a semicolon (;) as a deliminator between the VCN-specific subnets or IPs.
Example: `["1.1.1.1","1.1.1.0/24","ocid1.vcn.oc1.sea.","ocid1.vcn.oc1.sea.;1.1.1.1","ocid1.vcn.oc1.sea.;1.1.0.0/16"]`

For Exadata Cloud@Customer, this is an array of IP addresses or CIDR (Classless Inter-Domain Routing) notations.
Example: `["1.1.1.1","1.1.1.0/24","1.1.2.25"]`

For an update operation, if you want to delete all the IPs in the ACL, then use an array with a single empty string entry. | -* Configure network access [with access control list enabled](#autonomous-database-with-access-control-list-enabled-on-dedicated-exadata-infrastructure) + ```yaml + --- + apiVersion: database.oracle.com/v1alpha1 + kind: AutonomousDatabase + metadata: + name: autonomousdatabase-sample + spec: + action: Update + details: + autonomousDatabaseOCID: ocid1.autonomousdatabase... + whitelistedIps: + - + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + ``` -> Note: -> -> * Operations on Exadata infrastructure require an `AutonomousDatabase` object to be in your cluster. These examples assume either the provision operation or the bind operation has been done before you begin, and the operator is authorized with API Key Authentication. -> * If you are creating an Autonomous Database, then see step 4 of [Provision an Autonomous Database](./README.md#provision-an-autonomous-database) in [Managing Oracle Autonomous Databases with Oracle Database Operator for Kubernetes](./README.md) topic to return to provisioning instructions. +2. Apply the yaml: -### Autonomous Database with PUBLIC access type on shared Exadata infrastructure + ```sh + $ kubectl apply -f config/samples/adb/autonomousdatabase_update_network_access.yaml + autonomousdatabase.database.oracle.com/autonomousdatabase-sample configured + ``` -To configure the network with PUBLIC access type, complete this procedure. +### Option 2 - Change the Network Access from "Private Endpoint Access Only" to "Allowing Secure Access from Anywhere" 1. Add the following parameters to the specification. An example file is availble here: [`config/samples/adb/autonomousdatabase_update_network_access.yaml`](./../../config/samples/adb/autonomousdatabase_update_network_access.yaml): - | Attribute | Type | Description | Required? | - |----|----|----|----| - | `networkAccess.accessType` | string | An enumeration (enum) value that defines how the database can be accessed. The value can be PUBLIC, RESTRICTED or PRIVATE. See [Types of Network Access](#types-of-network-access) for more descriptions. | Yes | + | Attribute | Type | Description | + |----|----|----| + | `privateEndpointLabel` | string | The hostname prefix for the resource. | ```yaml --- @@ -86,11 +67,10 @@ To configure the network with PUBLIC access type, complete this procedure. metadata: name: autonomousdatabase-sample spec: + action: Update details: autonomousDatabaseOCID: ocid1.autonomousdatabase... - networkAccess: - # Allow secure access from everywhere. - accessType: PUBLIC + privateEndpointLabel: "" ociConfig: configMapName: oci-cred secretName: oci-privatekey @@ -99,21 +79,20 @@ To configure the network with PUBLIC access type, complete this procedure. 2. Apply the yaml: ```sh - kubectl apply -f config/samples/adb/autonomousdatabase_update_network_access.yaml + $ kubectl apply -f config/samples/adb/autonomousdatabase_update_network_access.yaml autonomousdatabase.database.oracle.com/autonomousdatabase-sample configured ``` -### Autonomous Database with RESTRICTED access type on shared Exadata infrastructure +## Configuring Network Access with Access Control Rules (ACLs) -To configure the network with RESTRICTED access type, complete this procedure. +To configure Network Access with ACLs, complete this procedure. 1. Add the following parameters to the specification. An example file is availble here: [`config/samples/adb/autonomousdatabase_update_network_access.yaml`](./../../config/samples/adb/autonomousdatabase_update_network_access.yaml): - | Attribute | Type | Description | Required? | - |----|----|----|----| - | `networkAccess.accessType` | string | An enumerated (enum) that defines how the database can be accessed. The value can be PUBLIC, RESTRICTED or PRIVATE. See [Types of Network Access](#types-of-network-access) for more descriptions. | Yes | - | `networkAccess.accessControlList` | []string | The client IP access control list (ACL). This feature is available for Autonomous Databases on [shared Exadata infrastructure](https://docs.cloud.oracle.com/Content/Database/Concepts/adboverview.htm#AEI) and on Exadata Cloud@Customer.
Only clients connecting from an IP address included in the ACL may access the Autonomous Database instance.

For shared Exadata infrastructure, this is an array of CIDR (Classless Inter-Domain Routing) notations for a subnet or VCN OCID.
Use a semicolon (;) as a deliminator between the VCN-specific subnets or IPs.
Example: `["1.1.1.1","1.1.1.0/24","ocid1.vcn.oc1.sea.","ocid1.vcn.oc1.sea.;1.1.1.1","ocid1.vcn.oc1.sea.;1.1.0.0/16"]`

For Exadata Cloud@Customer, this is an array of IP addresses or CIDR (Classless Inter-Domain Routing) notations.
Example: `["1.1.1.1","1.1.1.0/24","1.1.2.25"]`

For an update operation, if you want to delete all the IPs in the ACL, use an array with a single empty string entry. | Yes | + | Attribute | Type | Description | + |----|----|----| + | `whitelistedIps` | []string | The client IP access control list (ACL). This feature is available for Autonomous Databases on [shared Exadata infrastructure](https://docs.cloud.oracle.com/Content/Database/Concepts/adboverview.htm#AEI) and on Exadata Cloud@Customer.
Only clients connecting from an IP address included in the ACL can access the Autonomous Database instance.

For shared Exadata infrastructure, this is an array of CIDR (Classless Inter-Domain Routing) notations for a subnet or VCN OCID.
Use a semicolon (;) as a deliminator between the VCN-specific subnets or IPs.
Example: `["1.1.1.1","1.1.1.0/24","ocid1.vcn.oc1.sea.","ocid1.vcn.oc1.sea.;1.1.1.1","ocid1.vcn.oc1.sea.;1.1.0.0/16"]`

For Exadata Cloud@Customer, this is an array of IP addresses or CIDR (Classless Inter-Domain Routing) notations.
Example: `["1.1.1.1","1.1.1.0/24","1.1.2.25"]`

For an update operation, if you want to delete all the IPs in the ACL, use an array with a single empty string entry. | ```yaml --- @@ -122,12 +101,12 @@ To configure the network with RESTRICTED access type, complete this procedure. metadata: name: autonomousdatabase-sample spec: + action: Update details: autonomousDatabaseOCID: ocid1.autonomousdatabase... networkAccess: # Restrict access by defining access control rules in an Access Control List (ACL). - accessType: RESTRICTED - accessControlList: + whitelistedIps: - 1.1.1.1 - 1.1.0.0/16 - ocid1.vcn... @@ -141,23 +120,22 @@ To configure the network with RESTRICTED access type, complete this procedure. 2. Apply the yaml: ```sh - kubectl apply -f config/samples/adb/autonomousdatabase_update_network_access.yaml + $ kubectl apply -f config/samples/adb/autonomousdatabase_update_network_access.yaml autonomousdatabase.database.oracle.com/autonomousdatabase-sample configured -### Autonomous Database with PRIVATE access type on shared Exadata infrastructure +## Configure Network Access with Private Endpoint Access Only -To configure the network with PRIVATE access type, complete this procedure +To change the Network Access to Private Endpoint Access Only, complete this procedure -1. Visit [Overview of VCNs and Subnets](https://docs.oracle.com/en-us/iaas/Content/Network/Tasks/managingVCNs_topic-Overview_of_VCNs_and_Subnets.htm#console) and [Network Security Groups](https://docs.oracle.com/en-us/iaas/Content/Network/Concepts/networksecuritygroups.htm#working) to see how to create VCNs, subnets, and network security groups (NSGs) if you haven't created them yet. The subnet and the NSG has to be in the same VCN. +1. Visit [Overview of VCNs and Subnets](https://docs.oracle.com/en-us/iaas/Content/Network/Tasks/managingVCNs_topic-Overview_of_VCNs_and_Subnets.htm#console) and [Network Security Groups](https://docs.oracle.com/en-us/iaas/Content/Network/Concepts/networksecuritygroups.htm#working) to see how to create VCNs, subnets, and network security groups (NSGs) if you haven't already created them. The subnet and the NSG must be in the same VCN. 2. Copy and paste the OCIDs of the subnet and NSG to the corresponding parameters. An example file is availble here: [`config/samples/adb/autonomousdatabase_update_network_access.yaml`](./../../config/samples/adb/autonomousdatabase_update_network_access.yaml): | Attribute | Type | Description | Required? | |----|----|----|----| - | `networkAccess.accessType` | string | An enumeration (enum) value that defines how the database can be accessed. The value can be PUBLIC, RESTRICTED or PRIVATE. See [Types of Network Access](#types-of-network-access) for more descriptions. | Yes | - | `networkAccess.privateEndpoint.subnetOCID` | string | The [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the subnet the resource is associated with.

**Subnet Restrictions:**
- For bare metal DB systems and for single node virtual machine DB systems, do not use a subnet that overlaps with 192.168.16.16/28.
- For Exadata and virtual machine 2-node RAC systems, do not use a subnet that overlaps with 192.168.128.0/20.
- For Autonomous Database, setting this will disable public secure access to the database.
These subnets are used by the Oracle Clusterware private interconnect on the database instance.
Specifying an overlapping subnet will cause the private interconnect to malfunction.
This restriction applies to both the client subnet and the backup subnet. | Yes | - | `networkAccess.privateEndpoint.nsgOCIDs` | string[] | A list of the [OCIDs](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the network security groups (NSGs) that this resource belongs to. Setting this to an empty array after the list is created removes the resource from all NSGs. For more information about NSGs, see [Security Rules](https://docs.cloud.oracle.com/Content/Network/Concepts/securityrules.htm).

**NsgOCIDs restrictions:**
- Autonomous Databases with private access require at least 1 Network Security Group (NSG). The nsgOCIDs array cannot be empty. | Yes | - | `networkAccess.privateEndpoint.hostnamePrefix` | string | The hostname prefix for the resource. | No | + | `subnetId` | string | The [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the subnet the resource is associated with.

**Subnet Restrictions:**
- For bare metal DB systems and for single-node virtual machine DB systems, do not use a subnet that overlaps with 192.168.16.16/28.
- For Exadata and virtual machine 2-node Oracle RAC systems, do not use a subnet that overlaps with 192.168.128.0/20.
- For Autonomous Database, setting `subnetID` disables public secure access to the database.
These subnets are used by the Oracle Clusterware private interconnect on the database instance.
Specifying an overlapping subnet will cause the private interconnect to malfunction.
This restriction applies to both the client subnet and the backup subnet. | Yes | + | `nsgIds` | string[] | The list of [OCIDs](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) for the network security groups (NSGs) to which this resource belongs. Setting `nsgIds` to an empty list removes all resources from all NSGs. For more information about NSGs, see [Security Rule](https://docs.cloud.oracle.com/Content/Network/Concepts/securityrules.htm).

**NsgIds restrictions:**
- A network security group (NSG) is optional for Autonomous Databases with private access. The nsgIds list can be empty. | No | + | `privateEndpointLabel` | string | The resource's private endpoint label.
- Setting the endpoint label to a non-empty string creates a private endpoint database.
- Resetting the endpoint label to an empty string, after the creation of the private endpoint database, changes the private endpoint database to a public endpoint database.
- Setting the endpoint label to a non-empty string value, updates to a new private endpoint database, when the database is disabled and re-enabled.
This setting cannot be updated in parallel with any of the following: licenseModel, cpuCoreCount, computeCount, computeModel, adminPassword, whitelistedIps, isMTLSConnectionRequired, dbWorkload, dbVersion, dbName, or isFreeTier. | No | ```yaml --- @@ -166,29 +144,29 @@ To configure the network with PRIVATE access type, complete this procedure metadata: name: autonomousdatabase-sample spec: + action: Update details: autonomousDatabaseOCID: ocid1.autonomousdatabase... - networkAccess: - # Assigns a private endpoint, private IP, and hostname to your database. - accessType: PRIVATE - privateEndpoint: - subnetOCID: ocid1.subnet... - nsgOCIDs: - - ocid1.networksecuritygroup... + subnetId: ocid1.subnet... + nsgIds: + - ocid1.networksecuritygroup... ociConfig: configMapName: oci-cred secretName: oci-privatekey ``` -### Allow both TLS and mutual TLS (mTLS) authentication of Autonomous Database on shared Exadata infrastructure +## Allowing TLS or Require Only Mutual TLS (mTLS) Authentication +You can choose either to require mTLS authentication and disallow TLS authentication, or allow both mTLS and TLS authentication. + +### Require mutual TLS (mTLS) authentication and Disallow TLS Authentication -If you are using either the RESTRICTED or the PRIVATE network access option, then you can choose whether to permit both TLS and mutual TLS (mTLS) authentication, or to permit only mTLS authentication. To change the mTLS authentication setting, complete the following steps: +To configure your Autonomous Database instance to require mTLS connections and disallow TLS connections, complete this procedure. 1. Add the following parameters to the specification. An example file is availble here: [`config/samples/adb/autonomousdatabase_update_mtls.yaml`](./../../config/samples/adb/autonomousdatabase_update_mtls.yaml): | Attribute | Type | Description | Required? | |----|----|----|----| - | `networkAccess.isMTLSConnectionRequired` | boolean| Indicates whether the Autonomous Database requires mTLS connections. | Yes | + | `isMtlsConnectionRequired` | boolean| Indicates whether the Autonomous Database requires mTLS connections. | Yes | ```yaml --- @@ -197,10 +175,10 @@ If you are using either the RESTRICTED or the PRIVATE network access option, the metadata: name: autonomousdatabase-sample spec: + action: Update details: autonomousDatabaseOCID: ocid1.autonomousdatabase... - networkAccess: - isMTLSConnectionRequired: false + isMtlsConnectionRequired: true ociConfig: configMapName: oci-cred secretName: oci-privatekey @@ -209,20 +187,26 @@ If you are using either the RESTRICTED or the PRIVATE network access option, the 2. Apply the yaml: ```sh - kubectl apply -f config/samples/adb/autonomousdatabase_update_mtls.yaml + $ kubectl apply -f config/samples/adb/autonomousdatabase_update_mtls.yaml autonomousdatabase.database.oracle.com/autonomousdatabase-sample configured ``` -### Autonomous Database with access control list enabled on dedicated Exadata infrastructure +### Allow both TLS and mutual TLS (mTLS) authentication -To configure the network with RESTRICTED access type using an access control list (ACL), complete this procedure. +If your Autonomous Database instance is configured to allow only mTLS connections, then you can reconfigure the instance to permit both mTLS and TLS connections. When you reconfigure the instance to permit both mTLS and TLS, you can use both authentication types at the same time, so that connections are no longer restricted to require mTLS authentication. -1. Add the following parameters to the specification. An example file is availble here: [`config/samples/adb/autonomousdatabase_update_network_access.yaml`](./../../config/samples/adb/autonomousdatabase_update_network_access.yaml): +This option only applies to Autonomous Databases on shared Exadata infrastructure. You can permit TLS connections when network access type is configured by using one of the following options: + +* **Access Control Rules (ACLs)**: with ACLs defined. +* **Private Endpoint Access Only**: with a private endpoint defined. + +Complete this procedure to allow both TLS and mTLS authentication. + +1. Add the following parameters to the specification. An example file is availble here: [`config/samples/adb/autonomousdatabase_update_mtls.yaml`](./../../config/samples/adb/autonomousdatabase_update_mtls.yaml): | Attribute | Type | Description | Required? | |----|----|----|----| - | `networkAccess.isAccessControlEnabled` | boolean | Indicates if the database-level access control is enabled.

If disabled, then database access is defined by the network security rules.

If enabled, then database access is restricted to the IP addresses defined by the rules specified with the `accessControlList` property. While specifying `accessControlList` rules is optional, if database-level access control is enabled, and no rules are specified, then the database will become inaccessible. The rules can be added later by using the `UpdateAutonomousDatabase` API operation, or by using the edit option in console.

When creating a database clone, you should specify the access control setting that you want the clone database to use. By default, database-level access control will be disabled for the clone.
This property is applicable only to Autonomous Databases on the Exadata Cloud@Customer platform. | Yes | - | `networkAccess.accessControlList` | []string | The client IP access control list (ACL). This feature is available for autonomous databases on [shared Exadata infrastructure](https://docs.cloud.oracle.com/Content/Database/Concepts/adboverview.htm#AEI) and on Exadata Cloud@Customer.
Only clients connecting from an IP address included in the ACL may access the Autonomous Database instance.

For shared Exadata infrastructure, this is an array of CIDR (Classless Inter-Domain Routing) notations for a subnet or VCN OCID.
Use a semicolon (;) as a deliminator between the VCN-specific subnets or IPs.
Example: `["1.1.1.1","1.1.1.0/24","ocid1.vcn.oc1.sea.","ocid1.vcn.oc1.sea.;1.1.1.1","ocid1.vcn.oc1.sea.;1.1.0.0/16"]`

For Exadata Cloud@Customer, this is an array of IP addresses or CIDR (Classless Inter-Domain Routing) notations.
Example: `["1.1.1.1","1.1.1.0/24","1.1.2.25"]`

For an update operation, if you want to delete all the IPs in the ACL, use an array with a single empty string entry. | Yes | + | `isMtlsConnectionRequired` | boolean| Indicates whether the Autonomous Database requires mTLS connections. | Yes | ```yaml --- @@ -231,13 +215,47 @@ To configure the network with RESTRICTED access type using an access control lis metadata: name: autonomousdatabase-sample spec: + action: Update details: autonomousDatabaseOCID: ocid1.autonomousdatabase... - networkAccess: - isAccessControlEnabled: true - accessControlList: - - 1.1.1.1 - - 1.1.0.0/16 + isMtlsConnectionRequired: false + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + ``` + +2. Apply the yaml: + + ```sh + $ kubectl apply -f config/samples/adb/autonomousdatabase_update_mtls.yaml + autonomousdatabase.database.oracle.com/autonomousdatabase-sample configured + ``` + +## Autonomous Database with access control list enabled on dedicated Exadata infrastructure + +To configure the network access of Autonomous Database with access control list (ACL) on dedicated Exadata infrastructure, complete this procedure. + +1. Add the following parameters to the specification. An example file is available here: [`config/samples/adb/autonomousdatabase_update_network_access.yaml`](./../../config/samples/adb/autonomousdatabase_update_network_access.yaml): + + | Attribute | Type | Description | Required? | + |----|----|----|----| + | `isAccessControlEnabled` | boolean | Indicates if the database-level access control is enabled.

If disabled, then database access is defined by the network security rules.

If enabled, then database access is restricted to the IP addresses defined by the rules specified with the `accessControlList` property. While specifying `accessControlList` rules is optional, if database-level access control is enabled, and no rules are specified, then the database will become inaccessible. The rules can be added later by using the `UpdateAutonomousDatabase` API operation, or by using the edit option in console.

When creating a database clone, you should specify the access control setting that you want the clone database to use. By default, database-level access control is disabled for the clone.
This property is applicable only to Autonomous Databases on the Exadata Cloud@Customer platform. | Yes | + | `accessControlList` | []string | The client IP access control list (ACL). This feature is available for Autonomous Databases on [shared Exadata infrastructure](https://docs.cloud.oracle.com/Content/Database/Concepts/adboverview.htm#AEI) and on Exadata Cloud@Customer.
Only clients connecting from an IP address included in the ACL can access the Autonomous Database instance.

For shared Exadata infrastructure, the access control list is an array of CIDR (Classless Inter-Domain Routing) notations for a subnet or VCN OCID.
Use a semicolon (;) as a deliminator between the VCN-specific subnets or IPs.
Example: `["1.1.1.1","1.1.1.0/24","ocid1.vcn.oc1.sea.","ocid1.vcn.oc1.sea.;1.1.1.1","ocid1.vcn.oc1.sea.;1.1.0.0/16"]`

For Exadata Cloud@Customer, this is an array of IP addresses or CIDR (Classless Inter-Domain Routing) notations.
Example: `["1.1.1.1","1.1.1.0/24","1.1.2.25"]`

For an update operation, if you want to delete all the IPs in the ACL, use an array with a single empty string entry. | Yes | + + ```yaml + --- + apiVersion: database.oracle.com/v1alpha1 + kind: AutonomousDatabase + metadata: + name: autonomousdatabase-sample + spec: + action: Update + details: + autonomousDatabaseOCID: ocid1.autonomousdatabase... + isAccessControlEnabled: true + accessControlList: + - 1.1.1.1 + - 1.1.0.0/16 ociConfig: configMapName: oci-cred secretName: oci-privatekey @@ -246,5 +264,5 @@ To configure the network with RESTRICTED access type using an access control lis 2. Apply the yaml: ```sh - kubectl apply -f config/samples/adb/autonomousdatabase_update_network_access.yaml + $ kubectl apply -f config/samples/adb/autonomousdatabase_update_network_access.yaml autonomousdatabase.database.oracle.com/autonomousdatabase-sample configured diff --git a/docs/adb/README.md b/docs/adb/README.md index 1b59c4d6..e8164697 100644 --- a/docs/adb/README.md +++ b/docs/adb/README.md @@ -1,14 +1,14 @@ # Managing Oracle Autonomous Databases with Oracle Database Operator for Kubernetes -Before you use the Oracle Database Operator for Kubernetes (the operator), ensure your system meets all of the Oracle Autonomous Database (ADB) Prerequisites [ADB_PREREQUISITES](./ADB_PREREQUISITES.md). +Before you use the Oracle Database Operator for Kubernetes (the operator), ensure that your system meets all of the Oracle Autonomous Database (ADB) Prerequisites [ADB_PREREQUISITES](./ADB_PREREQUISITES.md). -As indicated in the prerequisites (see above), to interact with OCI services, either the cluster has to be authorized using Principal Instance, or using the API Key Authentication by specifying the configMap and the secret under the `ociConfig` field. +As indicated in the prerequisites (see above), to interact with OCI services, either the cluster must be authorized using Principal Instance, or the cluster must be authorized using the API Key Authentication by specifying the configMap and the secret under the `ociConfig` field. ## Required Permissions -The operator must be given the required type of access in a policy written by an administrator to manage the Autonomous Databases. See [Let database and fleet admins manage Autonomous Databases](https://docs.oracle.com/en-us/iaas/Content/Identity/Concepts/commonpolicies.htm#db-admins-manage-adb) for sample Autonomous Database policies. +The operator must be given the required type of access in a policy written by an administrator to manage the Autonomous Databases. For examples of Autonomous Database policies, see: [Let database and fleet admins manage Autonomous Databases](https://docs.oracle.com/en-us/iaas/Content/Identity/Concepts/commonpolicies.htm#db-admins-manage-adb) -The permission to view the workrequests is also required, so that the operator will update the resources when the work is done. See [Viewing Work Requests](https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contengviewingworkrequests.htm#contengviewingworkrequests) for sample work request policies. +Permissions to view the work requests are also required, so that the operator can update the resources when the work is done. For example work request policies, see: [Viewing Work Requests](https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contengviewingworkrequests.htm#contengviewingworkrequests) ## Supported Features @@ -25,14 +25,15 @@ After you create the resource, you can use the operator to perform the following * [Download instance credentials (wallets)](#download-wallets) of an Autonomous Database * [Stop/Start/Terminate](#stopstartterminate) an Autonomous Database * [Delete the resource](#delete-the-resource) from the cluster +* [Clone](#clone-an-existing-autonomous-database) an existing Autonomous Database -To debug the Oracle Autonomous Databases with Oracle Database operator, see [Debugging and troubleshooting](#debugging-and-troubleshooting) +To debug the Oracle Autonomous Databases with Oracle Database Operator, see [Debugging and troubleshooting](#debugging-and-troubleshooting) ## Provision an Autonomous Database -Follow these steps to provision an Autonomous Database that will map objects in your cluster. +To provision an Autonomous Database that will map objects in your cluster, complete the following steps: -1. Get the `Compartment OCID`. +1. Obtain the `Compartment OCID`. Log in to the Cloud Console and click `Compartment`. @@ -66,24 +67,36 @@ Follow these steps to provision an Autonomous Database that will map objects in kubectl create secret generic admin-password --from-literal=admin-password='password_here' ``` -4. Add the following fields to the AutonomousDatabase resource definition. An example `.yaml` file is available here: [`config/samples/adb/autonomousdatabase_create.yaml`](./../../config/samples/adb/autonomousdatabase_create.yaml) +4. Add the following fields to the Autonomous Database resource definition. An example `.yaml` file is available here: [`config/samples/adb/autonomousdatabase_create.yaml`](./../../config/samples/adb/autonomousdatabase_create.yaml) | Attribute | Type | Description | Required? | |----|----|----|----| - | `spec.details.compartmentOCID` | string | The [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment of the Autonomous Database. | Yes | + | `spec.details.compartmentId` | string | The [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment of the Autonomous Database. | Yes | | `spec.details.dbName` | string | The database name. The name must begin with an alphabetic character and can contain a maximum of 14 alphanumeric characters. Special characters are not permitted. The database name must be unique in the tenancy. | Yes | | `spec.details.displayName` | string | The user-friendly name for the Autonomous Database. The name does not have to be unique. | Yes | - | `spec.details.cpuCoreCount` | int | The number of OCPU cores to be made available to the database. | Yes | - | `spec.details.adminPassword` | dictionary | The password for the ADMIN user. The password must be between 12 and 30 characters long, and must contain at least 1 uppercase, 1 lowercase, and 1 numeric character. It cannot contain the double quote symbol (") or the username "admin", regardless of casing.

Either `k8sSecret.name` or `ociSecret.ocid` must be provided. If both `k8sSecret.name` and `ociSecret.ocid` appear, the Operator reads the password from the K8s secret that `k8sSecret.name` refers to. | Yes | + | `spec.details.dbWorkload` | string | The Autonomous Database workload type. The following values are valid:
`OLTP` - indicates an Autonomous Transaction Processing database
`DW` - indicates an Autonomous Data Warehouse database
`AJD` - indicates an Autonomous JSON Database
`APEX` - indicates an Autonomous Database with the Oracle APEX Application Development workload type.
This cannot be updated in parallel with any of the following: licenseModel, cpuCoreCount, computeCount, computeModel, adminPassword, whitelistedIps, isMtlsConnectionRequired, privateEndpointLabel, nsgIds, dbVersion, dbName, or isFreeTier. | No | + | `spec.details.licenseModel` | string | The Oracle license model that applies to the Oracle Autonomous Database. Bring your own license (BYOL) allows you to apply your current on-premises Oracle software licenses to equivalent, highly automated Oracle services in the cloud.License Included allows you to subscribe to new Oracle Database software licenses and the Oracle Database service. Note that when provisioning an [Autonomous Database on dedicated Exadata infrastructure](https://docs.oracle.com/en/cloud/paas/autonomous-database/index.html), this attribute must be null. It is already set at the Autonomous Exadata Infrastructure level. When provisioning an [Autonomous Database Serverless ](https://docs.oracle.com/en/cloud/paas/autonomous-database/index.html) database, if a value is not specified, the system defaults the value to `BRING_YOUR_OWN_LICENSE`. Bring your own license (BYOL) also allows you to select the DB edition using the optional parameter.
This cannot be updated in parallel with any of the following: cpuCoreCount, computeCount, dataStorageSizeInTBs, adminPassword, isMtlsConnectionRequired, dbWorkload, privateEndpointLabel, nsgIds, dbVersion, dbName, or isFreeTier. | No | + | `spec.details.dbVersion` | string | A valid Oracle Database version for Autonomous Database. | No | + | `spec.details.dataStorageSizeInTBs` | int | The size, in terabytes, of the data volume that will be created and attached to the database. This storage can later be scaled up if needed. For Autonomous Databases on dedicated Exadata infrastructure, the maximum storage value is determined by the infrastructure shape. See Characteristics of [Infrastructure Shapes](https://www.oracle.com/pls/topic/lookup?ctx=en/cloud/paas/autonomous-database&id=ATPFG-GUID-B0F033C1-CC5A-42F0-B2E7-3CECFEDA1FD1) for shape details. A full Exadata service is allocated when the Autonomous Database size is set to the upper limit (384 TB). | No | + | `spec.details.cpuCoreCount` | int | The number of CPU cores to be made available to the database. For Autonomous Databases on dedicated Exadata infrastructure, the maximum number of cores is determined by the infrastructure shape. See [Characteristics of Infrastructure Shapes](https://www.oracle.com/pls/topic/lookup?ctx=en/cloud/paas/autonomous-database&id=ATPFG-GUID-B0F033C1-CC5A-42F0-B2E7-3CECFEDA1FD1) for shape details.
**Note:** This parameter cannot be used with the `ocpuCount` parameter. | Conditional | + | `spec.details.computeModel` | string | The compute model of the Autonomous Database. This is required if using the `computeCount` parameter. If using `cpuCoreCount` then it is an error to specify `computeModel` to a non-null value. ECPU compute model is the recommended model and OCPU compute model is legacy. | Conditional | + | `spec.details.computeCount` | float32 | The compute amount (CPUs) available to the database. Minimum and maximum values depend on the compute model and whether the database is an Autonomous Database Serverless instance or an Autonomous Database on Dedicated Exadata Infrastructure.
For an Autonomous Database Serverless instance, the 'ECPU' compute model requires a minimum value of one, for databases in the elastic resource pool and minimum value of two, otherwise. Required when using the `computeModel` parameter. When using `cpuCoreCount` parameter, it is an error to specify computeCount to a non-null value. Providing `computeModel` and `computeCount` is the preferred method for both OCPU and ECPU. | Conditional | + | `spec.details.ocpuCount` | float32 | The number of OCPU cores to be made available to the database.
The following points apply:
- For Autonomous Databases on Dedicated Exadata infrastructure, to provision less than 1 core, enter a fractional value in an increment of 0.1. For example, you can provision 0.3 or 0.4 cores, but not 0.35 cores. (Note that fractional OCPU values are not supported for Autonomous Database Serverless instances.)
- To provision 1 or more cores, you must enter an integer between 1 and the maximum number of cores available for the infrastructure shape. For example, you can provision 2 cores or 3 cores, but not 2.5 cores. This applies to an Autonomous Database Serverless instance or an Autonomous Database on Dedicated Exadata Infrastructure.
- For Autonomous Database Serverless instances, this parameter is not used.
For Autonomous Databases on Dedicated Exadata infrastructure, the maximum number of cores is determined by the infrastructure shape. See [Characteristics of Infrastructure Shapes](https://www.oracle.com/pls/topic/lookup?ctx=en/cloud/paas/autonomous-database&id=ATPFG-GUID-B0F033C1-CC5A-42F0-B2E7-3CECFEDA1FD1) for shape details.
**Note:** This parameter cannot be used with the `cpuCoreCount` parameter. | Conditional | + | `spec.details.adminPassword` | dictionary | The password for the ADMIN user. The password must be between 12 and 30 characters long, and must contain at least 1 uppercase, 1 lowercase, and 1 numeric character. It cannot contain the double quote symbol (") or the username "admin", regardless of casing.

Either `k8sSecret.name` or `ociSecret.id` must be provided. If both `k8sSecret.name` and `ociSecret.id` appear, the Operator reads the password from the K8s secret that `k8sSecret.name` refers to. | Yes | | `spec.details.adminPassword.k8sSecret.name` | string | The **name** of the K8s Secret where you want to hold the password for the ADMIN user. | Conditional | - |`spec.details.adminPassword.ociSecret.ocid` | string | The **[OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm)** of the [OCI Secret](https://docs.oracle.com/en-us/iaas/Content/KeyManagement/Tasks/managingsecrets.htm) where you want to hold the password for the ADMIN user. | Conditional | + |`spec.details.adminPassword.ociSecret.id` | string | The **[OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm)** of the [OCI Secret](https://docs.oracle.com/en-us/iaas/Content/KeyManagement/Tasks/managingsecrets.htm) where you want to hold the password for the ADMIN user. | Conditional | | `spec.details.dataStorageSizeInTBs` | int | The size, in terabytes, of the data volume that will be created and attached to the database. This storage can later be scaled up if needed. | Yes | | `spec.details.isAutoScalingEnabled` | boolean | Indicates if auto scaling is enabled for the Autonomous Database OCPU core count. The default value is `FALSE` | No | - | `spec.details.isDedicated` | boolean | True if the database is on dedicated [Exadata infrastructure](https://docs.cloud.oracle.com/Content/Database/Concepts/adbddoverview.htm). `spec.details.autonomousContainerDatabase.k8sACD.name` or `spec.details.autonomousContainerDatabase.ociACD.ocid` has to be provided if the value is true. | No | + | `spec.details.isDedicated` | boolean | True if the database is on dedicated [Exadata infrastructure](https://docs.cloud.oracle.com/Content/Database/Concepts/adbddoverview.htm). `spec.details.autonomousContainerDatabase.k8sACD.name` or `spec.details.autonomousContainerDatabase.ociACD.id` has to be provided if the value is true. | No | + | `spec.details.isFreeTier` | boolean | Indicates if this is an Always Free resource. The default value is false. Note that Always Free Autonomous Databases have 1 CPU and 20GB of memory. For Always Free databases, memory and CPU cannot be scaled.
This cannot be updated in parallel with any of the following: licenseModel, cpuCoreCount, computeCount, computeModel, adminPassword, whitelistedIps, isMtlsConnectionRequired, privateEndpointLabel, nsgIds, dbVersion, or dbName. | No | + | `spec.details.isAccessControlEnabled` | boolean | Indicates if the database-level access control is enabled.
If disabled, database access is defined by the network security rules.
If enabled, database access is restricted to the IP addresses defined by the rules specified with the `whitelistedIps` property. While specifying `whitelistedIps` rules is optional, if database-level access control is enabled and no rules are specified, the database will become inaccessible.
When creating a database clone, the desired access control setting should be specified. By default, database-level access control will be disabled for the clone.
This property is applicable only to Autonomous Databases on the Exadata Cloud@Customer platform. For Autonomous Database Serverless instances, `whitelistedIps` is used. | No | + | `spec.details.whitelistedIps` | []string | The client IP access control list (ACL). This feature is available for [Autonomous Database Serverless](https://docs.oracle.com/en/cloud/paas/autonomous-database/index.html) and on Exadata Cloud@Customer.
Only clients connecting from an IP address included in the ACL may access the Autonomous Database instance.
If `arePrimaryWhitelistedIpsUsed` is 'TRUE' then Autonomous Database uses this primary's IP access control list (ACL) for the disaster recovery peer called `standbywhitelistedips`.
For Autonomous Database Serverless, this is an array of CIDR (classless inter-domain routing) notations for a subnet or VCN OCID (virtual cloud network Oracle Cloud ID).
Multiple IPs and VCN OCIDs should be separate strings separated by commas. However, if other configurations require multiple pieces of information, then each piece is connected with semicolon (;) as a delimiter.
Example: `["1.1.1.1","1.1.1.0/24","ocid1.vcn.oc1.sea.","ocid1.vcn.oc1.sea.;1.1.1.1","ocid1.vcn.oc1.sea.;1.1.0.0/16"]`
For Exadata Cloud@Customer, this is an array of IP addresses or CIDR notations.
Example: `["1.1.1.1","1.1.1.0/24","1.1.2.25"]`
For an update operation, if you want to delete all the IPs in the ACL, use an array with a single empty string entry.
This cannot be updated in parallel with any of the following: licenseModel, cpuCoreCount, computeCount, computeModel, adminPassword, isMtlsConnectionRequired, dbWorkload, dbVersion, dbName, or isFreeTier. | No | + | `spec.details.subnetId` | string | The [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the subnet the resource is associated with.
**Subnet Restrictions:**
- For Autonomous Database, setting this will disable public secure access to the database.
These subnets are used by the Oracle Clusterware private interconnect on the database instance.
Specifying an overlapping subnet will cause the private interconnect to malfunction.
This restriction applies to both the client subnet and the backup subnet. | No | + | `spec.details.nsgIds` | []string | The list of [OCIDs](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) for the network security groups (NSGs) to which this resource belongs. Setting this to an empty list removes all resources from all NSGs. For more information about NSGs, see [Security Rules](https://docs.cloud.oracle.com/Content/Network/Concepts/securityrules.htm).
**NsgIds restrictions:**
- A network security group (NSG) is optional for Autonomous Databases with private access. The nsgIds list can be empty. | No | + | `spec.details.privateEndpointLabel` | string | The resource's private endpoint label.
- Setting the endpoint label to a non-empty string creates a private endpoint database.
- Resetting the endpoint label to an empty string, after the creation of the private endpoint database, changes the private endpoint database to a public endpoint database.
- Setting the endpoint label to a non-empty string value, updates to a new private endpoint database, when the database is disabled and re-enabled.
This setting cannot be updated in parallel with any of the following: licenseModel, cpuCoreCount, computeCount, computeModel, adminPassword, whitelistedIps, isMTLSConnectionRequired, dbWorkload, dbVersion, dbName, or isFreeTier. | No | + | `spec.details.isMtlsConnectionRequired` | boolean | Specifies if the Autonomous Database requires mTLS connections. | No | | `spec.details.autonomousContainerDatabase.k8sACD.name` | string | The **name** of the K8s Autonomous Container Database resource | No | - | `spec.details.autonomousContainerDatabase.ociACD.ocid` | string | The Autonomous Container Database [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm). | No | + | `spec.details.autonomousContainerDatabase.ociACD.id` | string | The Autonomous Container Database [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm). | No | | `spec.details.freeformTags` | dictionary | Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tag](https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).

Example:
`freeformTags:`
    `key1: value1`
    `key2: value2`| No | - | `spec.details.dbWorkload` | string | The Oracle Autonomous Database workload type. The following values are valid:
- OLTP - indicates an Autonomous Transaction Processing database
- DW - indicates an Autonomous Data Warehouse database
- AJD - indicates an Autonomous JSON Database
- APEX - indicates an Autonomous Database with the Oracle APEX Application Development workload type. | No | - | `spec.details.dbVersion` | string | A valid Oracle Database release for Oracle Autonomous Database. | No | | `spec.ociConfig` | dictionary | Not required when the Operator is authorized with [Instance Principal](./ADB_PREREQUISITES.md#authorized-with-instance-principal). Otherwise, you will need the values from the [Authorized with API Key Authentication](./ADB_PREREQUISITES.md#authorized-with-api-key-authentication) section. | Conditional | | `spec.ociConfig.configMapName` | string | Name of the ConfigMap that holds the local OCI configuration | Conditional | | `spec.ociConfig.secretName`| string | Name of the K8s Secret that holds the private key value | Conditional | @@ -95,8 +108,9 @@ Follow these steps to provision an Autonomous Database that will map objects in metadata: name: autonomousdatabase-sample spec: + action: Create details: - compartmentOCID: ocid1.compartment... + compartmentId: ocid1.compartment... dbName: NewADB displayName: NewADB cpuCoreCount: 1 @@ -111,7 +125,7 @@ Follow these steps to provision an Autonomous Database that will map objects in 5. Choose the type of network access (optional): - By default, the network access type is set to PUBLIC, which allows secure connections from anywhere. Uncomment the code block if you want configure the network access. See [Configuring Network Access of Autonomous Database](./NETWORK_ACCESS_OPTIONS.md) for more information. + By default, the network access type is set to PUBLIC, which allows secure connections from anywhere. Uncomment the code block if you want configure the network access. For more information, see: [Configuring Network Access of Autonomous Database](./NETWORK_ACCESS_OPTIONS.md) 6. Apply the YAML: @@ -142,7 +156,7 @@ The operator also generates the `AutonomousBackup` custom resources if a databas 3. Add the following fields to the AutonomousDatabase resource definition. An example `.yaml` file is available here: [`config/samples/adb/autonomousdatabase_bind.yaml`](./../../config/samples/adb/autonomousdatabase_bind.yaml) | Attribute | Type | Description | Required? | |----|----|----|----| - | `spec.details.autonomousDatabaseOCID` | string | The [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the Autonomous Database you want to bind (create a reference) in your cluster. | Yes | + | `spec.details.id` | string | The [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the Autonomous Database that you want to bind (create a reference) in your cluster. | Yes | | `spec.ociConfig` | dictionary | Not required when the Operator is authorized with [Instance Principal](./ADB_PREREQUISITES.md#authorized-with-instance-principal). Otherwise, you will need the values from the [Authorized with API Key Authentication](./ADB_PREREQUISITES.md#authorized-with-api-key-authentication) section. | Conditional | | `spec.ociConfig.configMapName` | string | Name of the ConfigMap that holds the local OCI configuration | Conditional | | `spec.ociConfig.secretName`| string | Name of the K8s Secret that holds the private key value | Conditional | @@ -154,8 +168,9 @@ The operator also generates the `AutonomousBackup` custom resources if a databas metadata: name: autonomousdatabase-sample spec: + action: Sync details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... + id: ocid1.autonomousdatabase... ociConfig: configMapName: oci-cred secretName: oci-privatekey @@ -170,7 +185,7 @@ The operator also generates the `AutonomousBackup` custom resources if a databas ## Scale the OCPU core count or storage -> Note: this operation requires an `AutonomousDatabase` object to be in your cluster. To use this example, either the provision operation or the bind operation must be done, and the operator is authorized with API Key Authentication. +> Note: this operation requires an `AutonomousDatabase` object to be in your cluster. To use this example, either the provision operation or the bind operation must be completed, and the operator must be authorized with API Key Authentication. You can scale up or scale down the Oracle Autonomous Database OCPU core count or storage by updating the `cpuCoreCount` and `dataStorageSizeInTBs` parameters. The `isAutoScalingEnabled` indicates whether auto scaling is enabled. In this example, the CPU count and storage size (TB) are scaled up to 2 and the auto-scaling is turned off by updating the `autonomousdatabase-sample` custom resource. @@ -183,8 +198,9 @@ You can scale up or scale down the Oracle Autonomous Database OCPU core count or metadata: name: autonomousdatabase-sample spec: + action: Update details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... + id: ocid1.autonomousdatabase... cpuCoreCount: 2 dataStorageSizeInTBs: 2 isAutoScalingEnabled: false @@ -215,8 +231,9 @@ You can rename the database by changing the values of the `dbName` and `displayN metadata: name: autonomousdatabase-sample spec: + action: Update details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... + id: ocid1.autonomousdatabase... dbName: RenamedADB displayName: RenamedADB ociConfig: @@ -257,8 +274,9 @@ You can rename the database by changing the values of the `dbName` and `displayN metadata: name: autonomousdatabase-sample spec: + action: Update details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... + id: ocid1.autonomousdatabase... adminPassword: k8sSecret: name: new-admin-password @@ -301,13 +319,14 @@ A client Wallet is required to connect to a shared Oracle Autonomous Database. U metadata: name: autonomousdatabase-sample spec: + action: Update details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... - wallet: - name: instance-wallet - password: - k8sSecret: - name: instance-wallet-password + id: ocid1.autonomousdatabase... + wallet: + name: instance-wallet + password: + k8sSecret: + name: instance-wallet-password ociConfig: configMapName: oci-cred secretName: oci-privatekey @@ -339,12 +358,12 @@ To use the secret in a deployment, refer to [Using Secrets](https://kubernetes.i > Note: this operation requires an `AutonomousDatabase` object to be in your cluster. This example assumes the provision operation or the bind operation has been done by the users and the operator is authorized with API Key Authentication. -To start, stop, or terminate a database, use the `lifecycleState` attribute. -Here's a list of the values you can set for `lifecycleState`: +To start, stop, or terminate a database, use the `action` attribute. +Here's a list of the values you can set for `action`: -* `AVAILABLE`: to start the database -* `STOPPED`: to stop the database -* `TERMINATED`: to terminate the database +* `Start`: to start the database +* `Stop`: to stop the database +* `Terminate`: to terminate the database 1. An example .yaml file is available here: [config/samples/adb/autonomousdatabase_stop_start_terminate.yaml](./../../config/samples/adb/autonomousdatabase_stop_start_terminate.yaml) @@ -355,9 +374,9 @@ Here's a list of the values you can set for `lifecycleState`: metadata: name: autonomousdatabase-sample spec: + action: Stop details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... - lifecycleState: STOPPED + id: ocid1.autonomousdatabase... ociConfig: configMapName: oci-cred secretName: oci-privatekey @@ -387,8 +406,9 @@ To delete the resource and terminate the Autonomous Database, complete these ste metadata: name: autonomousdatabase-sample spec: + action: Update details: - autonomousDatabaseOCID: ocid1.autonomousdatabase... + id: ocid1.autonomousdatabase... hardLink: true ociConfig: configMapName: oci-cred @@ -411,6 +431,83 @@ To delete the resource and terminate the Autonomous Database, complete these ste Now, you can verify that the database is in TERMINATING state on the Cloud Console. +## Clone an existing Autonomous Database + +> Note: this operation requires an `AutonomousDatabase` object to be in your cluster. This example assumes the provision operation or the bind operation has been done by the users and the operator is authorized with API Key Authentication. + +To clone an existing Autonomous Database, complete these steps: + +1. Add the following fields to the AutonomousDatabase resource definition. An example YAML file is available here: [config/samples/adb/autonomousdatabase_clone.yaml](./../../config/samples/adb/autonomousdatabase_clone.yaml) + | Attribute | Type | Description | Required? | + |----|----|----|----| + | `spec.details.id` | string | The [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the source Autonomous Database that you will clone to create a new Autonomous Database. | Yes | + | `spec.clone.cloneType` | string | The Autonomous Database clone type. Accepted values are: `FULL` and `METADATA`. | No | + | `spec.clone.compartmentId` | string | The [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment of the Autonomous Database. | Yes | + | `spec.clone.dbName` | string | The database name. The name must begin with an alphabetic character and can contain a maximum of 14 alphanumeric characters. Special characters are not permitted. The database name must be unique in the tenancy. | Yes | + | `spec.clone.displayName` | string | The user-friendly name for the Autonomous Database. The name does not have to be unique. | Yes | + | `spec.clone.dbWorkload` | string | The Autonomous Database workload type. The following values are valid:
`OLTP` - indicates an Autonomous Transaction Processing database
`DW` - indicates an Autonomous Data Warehouse database
`AJD` - indicates an Autonomous JSON Database
`APEX` - indicates an Autonomous Database with the Oracle APEX Application Development workload type.
This cannot be updated in parallel with any of the following: licenseModel, cpuCoreCount, computeCount, computeModel, adminPassword, whitelistedIps, isMtlsConnectionRequired, privateEndpointLabel, nsgIds, dbVersion, dbName, or isFreeTier. | No | + | `spec.clone.licenseModel` | string | The Oracle license model that applies to the Oracle Autonomous Database. Bring your own license (BYOL) allows you to apply your current on-premises Oracle software licenses to equivalent, highly automated Oracle services in the cloud.License Included allows you to subscribe to new Oracle Database software licenses and the Oracle Database service. Note that when provisioning an [Autonomous Database on dedicated Exadata infrastructure](https://docs.oracle.com/en/cloud/paas/autonomous-database/index.html), this attribute must be null. It is already set at the Autonomous Exadata Infrastructure level. When provisioning an [Autonomous Database Serverless ](https://docs.oracle.com/en/cloud/paas/autonomous-database/index.html) database, if a value is not specified, the system defaults the value to `BRING_YOUR_OWN_LICENSE`. Bring your own license (BYOL) also allows you to select the DB edition using the optional parameter.
This cannot be updated in parallel with any of the following: cpuCoreCount, computeCount, dataStorageSizeInTBs, adminPassword, isMtlsConnectionRequired, dbWorkload, privateEndpointLabel, nsgIds, dbVersion, dbName, or isFreeTier. | No | + | `spec.clone.dbVersion` | string | A valid Oracle Database version for Autonomous Database. | No | + | `spec.clone.dataStorageSizeInTBs` | int | The size, in terabytes, of the data volume that will be created and attached to the database. This storage can later be scaled up if needed. For Autonomous Databases on dedicated Exadata infrastructure, the maximum storage value is determined by the infrastructure shape. See Characteristics of [Infrastructure Shapes](https://www.oracle.com/pls/topic/lookup?ctx=en/cloud/paas/autonomous-database&id=ATPFG-GUID-B0F033C1-CC5A-42F0-B2E7-3CECFEDA1FD1) for shape details. A full Exadata service is allocated when the Autonomous Database size is set to the upper limit (384 TB). | No | + | `spec.clone.cpuCoreCount` | int | The number of CPU cores to be made available to the database. For Autonomous Databases on dedicated Exadata infrastructure, the maximum number of cores is determined by the infrastructure shape. See [Characteristics of Infrastructure Shapes](https://www.oracle.com/pls/topic/lookup?ctx=en/cloud/paas/autonomous-database&id=ATPFG-GUID-B0F033C1-CC5A-42F0-B2E7-3CECFEDA1FD1) for shape details.
**Note:** This parameter cannot be used with the `ocpuCount` parameter. | Conditional | + | `spec.clone.computeModel` | string | The compute model of the Autonomous Database. This is required if using the `computeCount` parameter. If using `cpuCoreCount` then it is an error to specify `computeModel` to a non-null value. ECPU compute model is the recommended model and OCPU compute model is legacy. | Conditional | + | `spec.clone.computeCount` | float32 | The compute amount (CPUs) available to the database. Minimum and maximum values depend on the compute model and whether the database is an Autonomous Database Serverless instance or an Autonomous Database on Dedicated Exadata Infrastructure.
For an Autonomous Database Serverless instance, the 'ECPU' compute model requires a minimum value of one, for databases in the elastic resource pool and minimum value of two, otherwise. Required when using the `computeModel` parameter. When using `cpuCoreCount` parameter, it is an error to specify computeCount to a non-null value. Providing `computeModel` and `computeCount` is the preferred method for both OCPU and ECPU. | Conditional | + | `spec.clone.ocpuCount` | float32 | The number of OCPU cores to be made available to the database.
The following points apply:
- For Autonomous Databases on Dedicated Exadata infrastructure, to provision less than 1 core, enter a fractional value in an increment of 0.1. For example, you can provision 0.3 or 0.4 cores, but not 0.35 cores. (Note that fractional OCPU values are not supported for Autonomous Database Serverless instances.)
- To provision 1 or more cores, you must enter an integer between 1 and the maximum number of cores available for the infrastructure shape. For example, you can provision 2 cores or 3 cores, but not 2.5 cores. This applies to an Autonomous Database Serverless instance or an Autonomous Database on Dedicated Exadata Infrastructure.
- For Autonomous Database Serverless instances, this parameter is not used.
For Autonomous Databases on Dedicated Exadata infrastructure, the maximum number of cores is determined by the infrastructure shape. See [Characteristics of Infrastructure Shapes](https://www.oracle.com/pls/topic/lookup?ctx=en/cloud/paas/autonomous-database&id=ATPFG-GUID-B0F033C1-CC5A-42F0-B2E7-3CECFEDA1FD1) for shape details.
**Note:** This parameter cannot be used with the `cpuCoreCount` parameter. | Conditional | + | `spec.clone.adminPassword` | dictionary | The password for the ADMIN user. The password must be between 12 and 30 characters long, and must contain at least 1 uppercase, 1 lowercase, and 1 numeric character. It cannot contain the double quote symbol (") or the username "admin", regardless of casing.

Either `k8sSecret.name` or `ociSecret.id` must be provided. If both `k8sSecret.name` and `ociSecret.id` appear, the Operator reads the password from the K8s secret that `k8sSecret.name` refers to. | Yes | + | `spec.clone.adminPassword.k8sSecret.name` | string | The **name** of the K8s Secret where you want to hold the password for the ADMIN user. | Conditional | + |`spec.clone.adminPassword.ociSecret.id` | string | The **[OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm)** of the [OCI Secret](https://docs.oracle.com/en-us/iaas/Content/KeyManagement/Tasks/managingsecrets.htm) where you want to hold the password for the ADMIN user. | Conditional | + | `spec.clone.dataStorageSizeInTBs` | int | The size, in terabytes, of the data volume that will be created and attached to the database. This storage can later be scaled up if needed. | Yes | + | `spec.clone.isAutoScalingEnabled` | boolean | Indicates if auto scaling is enabled for the Autonomous Database OCPU core count. The default value is `FALSE` | No | + | `spec.clone.isDedicated` | boolean | True if the database is on dedicated [Exadata infrastructure](https://docs.cloud.oracle.com/Content/Database/Concepts/adbddoverview.htm). `spec.clone.autonomousContainerDatabase.k8sACD.name` or `spec.clone.autonomousContainerDatabase.ociACD.id` has to be provided if the value is true. | No | + | `spec.clone.isFreeTier` | boolean | Indicates if this is an Always Free resource. The default value is false. Note that Always Free Autonomous Databases have 1 CPU and 20GB of memory. For Always Free databases, memory and CPU cannot be scaled.
This cannot be updated in parallel with any of the following: licenseModel, cpuCoreCount, computeCount, computeModel, adminPassword, whitelistedIps, isMtlsConnectionRequired, privateEndpointLabel, nsgIds, dbVersion, or dbName. | No | + | `spec.clone.isAccessControlEnabled` | boolean | Indicates if the database-level access control is enabled.
If disabled, database access is defined by the network security rules.
If enabled, database access is restricted to the IP addresses defined by the rules specified with the `whitelistedIps` property. While specifying `whitelistedIps` rules is optional, if database-level access control is enabled and no rules are specified, the database will become inaccessible.
When creating a database clone, the desired access control setting should be specified. By default, database-level access control will be disabled for the clone.
This property is applicable only to Autonomous Databases on the Exadata Cloud@Customer platform. For Autonomous Database Serverless instances, `whitelistedIps` is used. | No | + | `spec.clone.whitelistedIps` | []string | The client IP access control list (ACL). This feature is available for [Autonomous Database Serverless](https://docs.oracle.com/en/cloud/paas/autonomous-database/index.html) and on Exadata Cloud@Customer.
Only clients connecting from an IP address included in the ACL may access the Autonomous Database instance.
If `arePrimaryWhitelistedIpsUsed` is 'TRUE' then Autonomous Database uses this primary's IP access control list (ACL) for the disaster recovery peer called `standbywhitelistedips`.
For Autonomous Database Serverless, this is an array of CIDR (classless inter-domain routing) notations for a subnet or VCN OCID (virtual cloud network Oracle Cloud ID).
Multiple IPs and VCN OCIDs should be separate strings separated by commas, but if it’s other configurations that need multiple pieces of information then its each piece is connected with semicolon (;) as a delimiter.
Example: `["1.1.1.1","1.1.1.0/24","ocid1.vcn.oc1.sea.","ocid1.vcn.oc1.sea.;1.1.1.1","ocid1.vcn.oc1.sea.;1.1.0.0/16"]`
For Exadata Cloud@Customer, this is an array of IP addresses or CIDR notations.
Example: `["1.1.1.1","1.1.1.0/24","1.1.2.25"]`
For an update operation, if you want to delete all the IPs in the ACL, use an array with a single empty string entry.
This cannot be updated in parallel with any of the following: licenseModel, cpuCoreCount, computeCount, computeModel, adminPassword, isMtlsConnectionRequired, dbWorkload, dbVersion, dbName, or isFreeTier. | No | + | `spec.clone.subnetId` | string | The [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the subnet the resource is associated with.
**Subnet Restrictions:**
- For Autonomous Database, setting this will disable public secure access to the database.
These subnets are used by the Oracle Clusterware private interconnect on the database instance.
Specifying an overlapping subnet will cause the private interconnect to malfunction.
This restriction applies to both the client subnet and the backup subnet. | No | + | `spec.clone.nsgIds` | []string | The list of [OCIDs](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) for the network security groups (NSGs) to which this resource belongs. Setting this to an empty list removes all resources from all NSGs. For more information about NSGs, see [Security Rules](https://docs.cloud.oracle.com/Content/Network/Concepts/securityrules.htm).
**NsgIds restrictions:**
- A network security group (NSG) is optional for Autonomous Databases with private access. The nsgIds list can be empty. | No | + | `spec.clone.privateEndpointLabel` | string | The resource's private endpoint label.
- Setting the endpoint label to a non-empty string creates a private endpoint database.
- Resetting the endpoint label to an empty string, after the creation of the private endpoint database, changes the private endpoint database to a public endpoint database.
- Setting the endpoint label to a non-empty string value, updates to a new private endpoint database, when the database is disabled and re-enabled.
This setting cannot be updated in parallel with any of the following: licenseModel, cpuCoreCount, computeCount, computeModel, adminPassword, whitelistedIps, isMTLSConnectionRequired, dbWorkload, dbVersion, dbName, or isFreeTier. | No | + | `spec.clone.isMtlsConnectionRequired` | boolean | Specifies if the Autonomous Database requires mTLS connections. | No | + | `spec.clone.autonomousContainerDatabase.k8sACD.name` | string | The **name** of the K8s Autonomous Container Database resource | No | + | `spec.clone.autonomousContainerDatabase.ociACD.id` | string | The Autonomous Container Database [OCID](https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm). | No | + | `spec.clone.freeformTags` | dictionary | Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tag](https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).

Example:
`freeformTags:`
    `key1: value1`
    `key2: value2`| No | + | `spec.ociConfig` | dictionary | Not required when the Operator is authorized with [Instance Principal](./ADB_PREREQUISITES.md#authorized-with-instance-principal). Otherwise, you will need the values from the [Authorized with API Key Authentication](./ADB_PREREQUISITES.md#authorized-with-api-key-authentication) section. | Conditional | + | `spec.ociConfig.configMapName` | string | Name of the ConfigMap that holds the local OCI configuration | Conditional | + | `spec.ociConfig.secretName`| string | Name of the K8s Secret that holds the private key value | Conditional | + + ```yaml + --- + apiVersion: database.oracle.com/v1alpha1 + kind: AutonomousDatabase + metadata: + name: autonomousdatabase-sample + spec: + action: Clone + details: + id: ocid1.autonomousdatabase... + clone: + compartmentId: ocid1.compartment... OR ocid1.tenancy... + dbName: ClonedADB + displayName: ClonedADB + cpuCoreCount: 1 + adminPassword: + k8sSecret: + name: admin-password + dataStorageSizeInTBs: 1 + dbWorkload: OLTP + cloneType: METADATA + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + ``` + +2. Apply the yaml + + ```sh + kubectl apply -f config/samples/adb/autonomousdatabase_clone.yaml + autonomousdatabase.database.oracle.com/autonomousdatabase-sample configured + ``` + +Now, you can verify that a cloned database with name "ClonedADB" is being provisioned on the Cloud Console. + ## Roles and Privileges requirements for Oracle Autonomous Database Controller Autonomous Database controller uses Kubernetes objects such as: diff --git a/docs/dbcs/README.md b/docs/dbcs/README.md index c8b8d5d9..2c06511c 100644 --- a/docs/dbcs/README.md +++ b/docs/dbcs/README.md @@ -1,12 +1,12 @@ -# Using the DB Operator DBCS Controller +# Using the DB Operator Oracle Base Database Service (OBDS) Controller -Oracle Cloud Infastructure (OCI) Oracle Base Database Cloud Service (BDBCS) provides single-node Database (DB) systems, deployed on virtual machines, and provides two-node Oracle Real Appliation Clusters (Oracle RAC) database systems on virtual machines. +Oracle Cloud Infastructure (OCI) Oracle Base Database Service (OBDS) provides single-node Database (DB) systems, deployed on virtual machines, and provides two-node Oracle Real Application Clusters (Oracle RAC) database systems on virtual machines. -The single-node DB systems and Oracle RAC systems on virtual machines are [co-managed Oracle Database cloud solutions](https://docs.oracle.com/en-us/iaas/Content/Database/Concepts/overview.htm). To manage the lifecycle of an OCI DBCS system, you can use the OCI Console, the REST API, or the Oracle Cloud Infrastructure command-line interface (CLI). At the granular level, you can use the Oracle Database CLI (DBCLI), Oracle Enterprise Manager, or Oracle SQL Developer. +The single-node DB systems and Oracle RAC systems on virtual machines are [co-managed Oracle Database cloud solutions](https://docs.oracle.com/en-us/iaas/Content/Database/Concepts/overview.htm). To manage the lifecycle of an OCI OBDS system, you can use the OCI Console, the REST API, or the Oracle Cloud Infrastructure command-line interface (CLI). At the granular level, you can use the Oracle Database CLI (DBCLI), Oracle Enterprise Manager, or Oracle SQL Developer. -The Oracle DB Operator DBCS Controller is a feature of the Oracle DB Operator for Kubernetes (OraOperator) which uses OCI's BDBCS service to support lifecycle management of the database systems. +The Oracle DB Operator Oracle Base Database Service (OBDS) Controller is a feature of the Oracle DB Operator for Kubernetes (OraOperator) which uses OCI's Oracle Base Database Service OBDS service to support lifecycle management of the database systems. -Note: Oracle Base Database Cloud Service (BDBCS) was previously known as Database Cloud Service (DBCS). +Note: Oracle Base Database Cloud Service (OBDS) was previously known as Database Cloud Service (DBCS). # Supported Database Editions and Versions @@ -22,55 +22,47 @@ Two-node Oracle RAC DB systems require Oracle Enterprise Edition - Extreme Perfo For standard provisioning of DB systems (using Oracle Automatic Storage Management (ASM) as your storage management software), the following database releases are supported: -- Oracle Database 21c +- Oracle Database 23ai - Oracle Database 19c -- Oracle Database 18c (18.0) -- Oracle Database 12c Release 2 (12.2) -- Oracle Database 12c Release 1 (12.1) -- Oracle Database 11g Release 2 (11.2) - For fast provisioning of single-node virtual machine database systems (using Logical Volume Manager as your storage management software), the following database releases are supported: -- Oracle Database 21c +- Oracle Database 23ai - Oracle Database 19c -- Oracle Database 18c -- Oracle Database 12c Release 2 (12.2) -# Oracle DB Operator DBCS Controller Deployment +# Oracle DB Operator Oracle Base Database Service (OBDS) Controller Deployment -To deploy OraOperator, use this [Oracle Database Operator for Kubernetes](https://github.com/oracle/oracle-database-operator/blob/main/README.md) step-by-step procedure. +To deploy Oracle Database Operator (`OraOperator`), use the [Oracle Database Operator for Kubernetes](https://github.com/oracle/oracle-database-operator/blob/main/README.md) step-by-step procedure. -After the Oracle Database Operator is deployed, you can see the DB operator pods running in the Kubernetes Cluster. As part of the OraOperator deployment, the DBCS Controller is deployed as a CRD (Custom Resource Definition). The following screen output is an example of such a deployment: -``` +After the Oracle Database Operator is deployed, you can see the DB operator pods running in the Kubernetes Cluster. As part of the `OraOperator` deployment, the OBDS Controller is deployed as a CRD (Custom Resource Definition). The following screen output is an example of such a deployment: +```bash [root@test-server oracle-database-operator]# kubectl get ns NAME STATUS AGE -cert-manager Active 2m5s -default Active 125d -kube-node-lease Active 125d -kube-public Active 125d -kube-system Active 125d -oracle-database-operator-system Active 17s <<<< namespace to deploy the Oracle Database Operator +cert-manager Active 33d +default Active 118d +kube-node-lease Active 118d +kube-public Active 118d +kube-system Active 118d +oracle-database-operator-system Active 10m <<<< namespace to deploy the Oracle Database Operator [root@test-server oracle-database-operator]# kubectl get all -n oracle-database-operator-system NAME READY STATUS RESTARTS AGE -pod/oracle-database-operator-controller-manager-665874bd57-dlhls 1/1 Running 0 28s -pod/oracle-database-operator-controller-manager-665874bd57-g2cgw 1/1 Running 0 28s -pod/oracle-database-operator-controller-manager-665874bd57-q42f8 1/1 Running 0 28s - +pod/oracle-database-operator-controller-manager-678f96f5f4-f4rhq 1/1 Running 0 10m +pod/oracle-database-operator-controller-manager-678f96f5f4-plxcp 1/1 Running 0 10m +pod/oracle-database-operator-controller-manager-678f96f5f4-qgcg8 1/1 Running 0 10m + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/oracle-database-operator-controller-manager-metrics-service ClusterIP 10.96.130.124 8443/TCP 29s -service/oracle-database-operator-webhook-service ClusterIP 10.96.4.104 443/TCP 29s - +service/oracle-database-operator-controller-manager-metrics-service ClusterIP 10.96.197.164 8443/TCP 11m +service/oracle-database-operator-webhook-service ClusterIP 10.96.35.62 443/TCP 11m + NAME READY UP-TO-DATE AVAILABLE AGE -deployment.apps/oracle-database-operator-controller-manager 3/3 3 3 29s - +deployment.apps/oracle-database-operator-controller-manager 3/3 3 3 11m + NAME DESIRED CURRENT READY AGE -replicaset.apps/oracle-database-operator-controller-manager-665874bd57 3 3 3 29s -[root@docker-test-server oracle-database-operator]# - +replicaset.apps/oracle-database-operator-controller-manager-6657bfc664 0 0 0 11m +replicaset.apps/oracle-database-operator-controller-manager-678f96f5f4 3 3 3 10m [root@test-server oracle-database-operator]# kubectl get crd NAME CREATED AT @@ -81,7 +73,7 @@ certificaterequests.cert-manager.io 2022-02-22T23:21:35Z certificates.cert-manager.io 2022-02-22T23:21:36Z challenges.acme.cert-manager.io 2022-02-22T23:21:36Z clusterissuers.cert-manager.io 2022-02-22T23:21:36Z -dbcssystems.database.oracle.com 2022-02-22T23:23:25Z <<<< CRD for DBCS Controller +dbcssystems.database.oracle.com 2022-02-22T23:23:25Z <<<< CRD for OBDS Controller issuers.cert-manager.io 2022-02-22T23:21:36Z orders.acme.cert-manager.io 2022-02-22T23:21:37Z shardingdatabases.database.oracle.com 2022-02-22T23:23:25Z @@ -89,56 +81,56 @@ singleinstancedatabases.database.oracle.com 2022-02-22T23:23:25Z ``` -# Prerequisites to deploy a DBCS system using Oracle DB Operator DBCS Controller +# Prerequisites to deploy a OBDS system using Oracle DB Operator OBDS Controller -Before you deploy a DBCS system in OCI using the Oracle DB Operator DBCS Controller, complete the following procedure. +Before you deploy a OBDS system in OCI using the Oracle DB Operator OBDS Controller, complete the following procedure. **CAUTION :** You must make the changes specified in this section before you proceed to the next section. -## 1. Create a Kubernetes Configmap. For example: We are creating a Kubernetes Configmap named `oci-cred` using the OCI account we are using as below: +## 1. Create a Kubernetes Configmap. In this example. we create a Kubernetes Configmap named `oci-cred` with the OCI account we are using: -``` +```bash kubectl create configmap oci-cred \ ---from-literal=tenancy=ocid1.tenancy.oc1..................67iypsmea \ ---from-literal=user=ocid1.user.oc1..aaaaaaaaxw3i...............ce6qzdrnmq \ ---from-literal=fingerprint=b2:7c:a8:d5:44:f5.....................:9a:55 \ +--from-literal=tenancy= \ +--from-literal=user= \ +--from-literal=fingerprint= \ --from-literal=region=us-phoenix-1 ``` ## 2. Create a Kubernetes secret `oci-privatekey` using the OCI Pem key taken from OCI console for the account you are using: -``` --- assuming the OCI Pem key to be "/root/.oci/oci_api_key.pem" +```bash +#---assuming the OCI Pem key to be "/root/.oci/oci_api_key.pem" kubectl create secret generic oci-privatekey --from-file=privatekey=/root/.oci/oci_api_key.pem ``` -## 3. Create a Kubernetes secret named `admin-password`; This passward must meet the minimum passward requirements for the OCI BDBCS Service. +## 3. Create a Kubernetes secret named `admin-password`; This passward must meet the minimum passward requirements for the OCI OBDS Service. For example: -``` --- assuming the passward has been added to a text file named "admin-password": +```bash +#-- assuming the passward has been added to a text file named "admin-password": kubectl create secret generic admin-password --from-file=./admin-password -n default ``` -## 4. Create a Kubernetes secret named `tde-password`; this passward must meet the minimum passward requirements for the OCI BDBCS Service. +## 4. Create a Kubernetes secret named `tde-password`; this passward must meet the minimum passward requirements for the OCI OBDS Service. For example: -``` --- assuming the passward has been added to a text file named "tde-password": +```bash +# -- assuming the passward has been added to a text file named "tde-password": kubectl create secret generic tde-password --from-file=./tde-password -n default ``` -## 5. Create an ssh key pair, and use its public key to create a Kubernetes secret named `oci-publickey`; the private key for this public key can be used later to access the DBCS system's host machine using ssh: +## 5. Create an SSH key pair, and use its public key to create a Kubernetes secret named `oci-publickey`; the private key for this public key can be used later to access the OBDS system's host machine using SSH: -``` -[root@test-server DBCS]# ssh-keygen -N "" -C "DBCS_System"-`date +%Y%m` -P "" +```bash +[root@test-server OBDS]# ssh-keygen -N "" -C "DBCS_System"-`date +%Y%m` -P "" Generating public/private rsa key pair. Enter file in which to save the key (/root/.ssh/id_rsa): Your identification has been saved in /root/.ssh/id_rsa. @@ -159,31 +151,35 @@ The key's randomart image is: +----[SHA256]-----+ -[root@test-server DBCS]# kubectl create secret generic oci-publickey --from-file=publickey=/root/DBCS/id_rsa.pub +[root@test-server OBDS]# kubectl create secret generic oci-publickey --from-file=publickey=/root/DBCS/id_rsa.pub ``` +# Use Cases to manage the lifecycle of an OCI OBDS System with Oracle DB Operator OBDS Controller +For more informatoin about the multiple use cases available to you to deploy and manage the OCI OBDS Service-based database using the Oracle DB Operator OBDS Controller, review this list: +[1. Deploy a DB System using OCI OBDS Service with minimal parameters](./provisioning/dbcs_service_with_minimal_parameters.md) +[2. Binding to an existing OBDS System already deployed in OCI Oracle Base Database Service](./provisioning/bind_to_existing_dbcs_system.md) +[3. Scale UP the shape of an existing OBDS System](./provisioning/scale_up_dbcs_system_shape.md) +[4. Scale DOWN the shape of an existing OBDS System](./provisioning/scale_down_dbcs_system_shape.md) +[5. Scale UP the storage of an existing OBDS System](./provisioning/scale_up_storage.md) +[6. Update License type of an existing OBDS System](./provisioning/update_license.md) +[7. Terminate an existing OBDS System](./provisioning/terminate_dbcs_system.md) +[8. Create OBDS with All Parameters with Storage Management as LVM](./provisioning/dbcs_service_with_all_parameters_lvm.md) +[9. Create OBDS with All Parameters with Storage Management as ASM](./provisioning/dbcs_service_with_all_parameters_asm.md) +[10. Deploy a 2 Node RAC DB System using OCI OBDS Service](./provisioning/dbcs_service_with_2_node_rac.md) +[11. Create PDB to an existing OBDS System already deployed in OCI OBDS Service](./provisioning/create_pdb_to_existing_dbcs_system.md) +[12. Create OBDS with PDB in OCI](./provisioning/create_dbcs_with_pdb.md) +[13. Create OBDS with KMS Vault Encryption in OCI](./provisioning/create_dbcs_with_kms.md) +[14. Migrate to KMS vault from TDE Wallet password encryption of an existing OBDS System already deployed in OCI Base OBDS Service](./provisioning/migrate_to_kms.md) +[15. Clone DB System from Existing DB System in OCI OBDS Service](./provisioning/clone_from_existing_dbcs.md) +[16. Clone DB System from Backup of Existing DB System in OCI OBDS Service](./provisioning/clone_from_backup_dbcs.md) +[17. Clone DB System from Existing Database of DB System in OCI OBDS Service](./provisioning/clone_from_database.md) -# Use Cases to manage the lifecycle of an OCI DBCS System with Oracle DB Operator DBCS Controller - -For more informatoin about the multiple use cases available to you to deploy and manage the OCI BDBCS Service-based database using the Oracle DB Operator DBCS Controller, review this list: - -[1. Deploy a DB System using OCI BDBCS Service with minimal parameters](./provisioning/dbcs_service_with_minimal_parameters.md) -[2. Binding to an existing DBCS System already deployed in OCI BDBCS Service](./provisioning/bind_to_existing_dbcs_system.md) -[3. Scale UP the shape of an existing BDBCS System](./provisioning/scale_up_dbcs_system_shape.md) -[4. Scale DOWN the shape of an existing BDBCS System](./provisioning/scale_down_dbcs_system_shape.md) -[5. Scale UP the storage of an existing BDBCS System](./provisioning/scale_up_storage.md) -[6. Update License type of an existing BDBCS System](./provisioning/update_license.md) -[7. Terminate an existing BDBCS System](./provisioning/terminate_dbcs_system.md) -[8. Create BDBCS with All Parameters with Storage Management as LVM](./provisioning/dbcs_service_with_all_parameters_lvm.md) -[9. Create BDBCS with All Parameters with Storage Management as ASM](./provisioning/dbcs_service_with_all_parameters_asm.md) -[10. Deploy a 2 Node RAC DB System using OCI BDBCS Service](./provisioning/dbcs_service_with_2_node_rac.md) - -## Connecting to OCI DBCS database deployed using Oracle DB Operator DBCS Controller +## Connecting to OCI OBDS database deployed using Oracle DB Operator OBDS Controller -After you have deployed the OCI BDBCS database with the Oracle DB Operator DBCS Controller, you can connect to the database. To see how to connect and use the database, refer to the steps in [Database Connectivity](./provisioning/database_connection.md). +After you have deployed the OCI OBDS database with the Oracle DB Operator OBDS Controller, you can connect to the database. To see how to connect and use the database, refer to the steps in [Database Connectivity](./provisioning/database_connection.md). ## Known Issues -If you encounter any issues with deployment, refer to the list of [Known Issues](./provisioning/known_issues.md) for an OCI DBCS System deployed using Oracle DB Operator DBCS Controller. +If you encounter any issues with deployment, refer to the list of [Known Issues](./provisioning/known_issues.md) for an OCI OBDS System deployed using Oracle DB Operator OBDS Controller. diff --git a/docs/dbcs/provisioning/bind_to_existing_dbcs_system.md b/docs/dbcs/provisioning/bind_to_existing_dbcs_system.md index 6fcff5de..eced7538 100644 --- a/docs/dbcs/provisioning/bind_to_existing_dbcs_system.md +++ b/docs/dbcs/provisioning/bind_to_existing_dbcs_system.md @@ -1,32 +1,32 @@ -# Binding to an existing DBCS System already deployed in OCI DBCS Service +# Binding to an existing OBDS System already deployed in OCI Oracle Base Database Service -In this use case, we bind the Oracle DB Operator DBCS Controller to an existing OCI DBCS System which has already been deployed earlier. This will help to manage the life cycle of that DBCS System using the Oracle DB Operator DBCS Controller. +In this use case, we bind the Oracle DB Operator OBDS Controller to an existing OCI OBDS System which has already been deployed earlier. This will help to manage the life cycle of that OBDS System using the Oracle DB Operator OBDS Controller. **NOTE** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. -This example uses `bind_to_existing_dbcs_system.yaml` to bind to an existing DBCS VMDB using Oracle DB Operator DBCS Controller with: +This example uses `bind_to_existing_dbcs_system.yaml` to bind to an existing OBDS VMDB using Oracle DB Operator OBDS Controller with: -- OCI Configmap as `oci-cred` +- OCI Configmap as `oci-cred-mumbai` - OCI Secret as `oci-privatekey` -- OCID of the existing DBCS System as `ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa` +- OCID of the existing OBDS System as `ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa` Use the file: [bind_to_existing_dbcs_system.yaml](./bind_to_existing_dbcs_system.yaml) for this use case as below: 1. Deploy the .yaml file: -```sh -[root@docker-test-server DBCS]# kubectl apply -f bind_dbcs.yaml +```bash +kubectl apply -f bind_to_existing_dbcs_system.yaml dbcssystem.database.oracle.com/dbcssystem-existing created ``` -2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the DBCS VMDB deployment. +2. Monitor the Oracle DB Leader Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB deployment. NOTE: Check the DB Operator Pod name in your environment. -``` -[root@docker-test-server DBCS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +```bash +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system ``` ## Sample Output -[Here](./bind_to_existing_dbcs_system_sample_output.log) is the sample output for binding to an existing DBCS System already deployed in OCI using Oracle DB Operator DBCS Controller. +[Here](./bind_to_existing_dbcs_system_sample_output.log) is the sample output for binding to an existing OBDS System already deployed in OCI using Oracle DB Operator OBDS Controller. diff --git a/docs/dbcs/provisioning/bind_to_existing_dbcs_system.yaml b/docs/dbcs/provisioning/bind_to_existing_dbcs_system.yaml index 49647229..6ff24bc8 100644 --- a/docs/dbcs/provisioning/bind_to_existing_dbcs_system.yaml +++ b/docs/dbcs/provisioning/bind_to_existing_dbcs_system.yaml @@ -1,8 +1,8 @@ -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: DbcsSystem metadata: name: dbcssystem-existing spec: - id: "ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa" + id: "ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa" ociConfigMap: "oci-cred" ociSecret: "oci-privatekey" diff --git a/docs/dbcs/provisioning/bind_to_existing_dbcs_system_sample_output.log b/docs/dbcs/provisioning/bind_to_existing_dbcs_system_sample_output.log index 454a4452..f6505337 100644 --- a/docs/dbcs/provisioning/bind_to_existing_dbcs_system_sample_output.log +++ b/docs/dbcs/provisioning/bind_to_existing_dbcs_system_sample_output.log @@ -48,130 +48,61 @@ replicaset.apps/oracle-database-operator-controller-manager-665874bd57 3 2022-03-08T23:27:48.625Z INFO controller-runtime.manager.controller.dbcssystem OCI provider configured succesfully {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} 2022-03-08T23:27:52.513Z INFO controller-runtime.manager.controller.dbcssystem Sync information from remote DbcsSystem System successfully {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} -[root@docker-test-server test]# kubectl describe dbcssystems.database.oracle.com dbcssystem-existing +[root@docker-test-server test]# kubectl describe dbcssystems.database.oracle.com dbcssystem-existing Name: dbcssystem-existing Namespace: default Labels: Annotations: lastSuccessfulSpec: - {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya","availabilityDomain":"O... -API Version: database.oracle.com/v1alpha1 + {"dbSystem":{"compartmentId":"","availabilityDomain":"","subnetId":"","shape":"","hostName":"","dbAdminPaswordSecret":"","dbBackupConfig":... +API Version: database.oracle.com/v4 Kind: DbcsSystem Metadata: - Creation Timestamp: 2022-03-08T23:27:48Z + Creation Timestamp: 2024-12-06T15:16:07Z Generation: 1 - Managed Fields: - API Version: database.oracle.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:kubectl.kubernetes.io/last-applied-configuration: - f:spec: - .: - f:id: - f:ociConfigMap: - f:ociSecret: - Manager: kubectl-client-side-apply - Operation: Update - Time: 2022-03-08T23:27:48Z - API Version: database.oracle.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - f:lastSuccessfulSpec: - f:spec: - f:dbSystem: - .: - f:availabilityDomain: - f:compartmentId: - f:cpuCoreCount: - f:dbAdminPaswordSecret: - f:dbBackupConfig: - f:dbEdition: - f:dbName: - f:dbUniqueName: - f:dbVersion: - f:diskRedundancy: - f:displayName: - f:faultDomains: - f:hostName: - f:nodeCount: - f:shape: - f:sshPublicKeys: - f:subnetId: - f:status: - .: - f:availabilityDomain: - f:cpuCoreCount: - f:dataStoragePercentage: - f:dataStorageSizeInGBs: - f:dbEdition: - f:dbInfo: - f:displayName: - f:id: - f:licenseModel: - f:network: - .: - f:clientSubnet: - f:domainName: - f:hostName: - f:listenerPort: - f:scanDnsName: - f:vcnName: - f:nodeCount: - f:recoStorageSizeInGB: - f:shape: - f:state: - f:storageManagement: - f:subnetId: - f:timeZone: - f:workRequests: - Manager: manager - Operation: Update - Time: 2022-03-08T23:27:52Z - Resource Version: 55191827 - UID: 96d7bc49-33e9-42cc-8dd0-ada9a5a4c7e5 + Resource Version: 116146012 + UID: 375b1bea-9b69-4b86-a2b1-fe7750608913 Spec: - Id: ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa - Oci Config Map: oci-cred + Db System: + Availability Domain: + Compartment Id: + Db Admin Pasword Secret: + Db Backup Config: + Host Name: + Kms Config: + Shape: + Subnet Id: + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htya6crmqdnyz5h7ngpi4azbhndm6ssdmyn7yxk2uhbvxala + Kms Config: + Oci Config Map: oci-cred-mumbai Oci Secret: oci-privatekey Status: - Availability Domain: OLou:PHX-AD-1 - Cpu Core Count: 1 + Availability Domain: OLou:AP-MUMBAI-1-AD-1 + Cpu Core Count: 2 Data Storage Percentage: 80 Data Storage Size In G Bs: 256 - Db Edition: ENTERPRISE_EDITION - Db Info: - Db Home Id: ocid1.dbhome.oc1.phx.anyhqljr5gy3jhqat52milqwt3gq6lwohhacwg5yi4mtzq7c7hag53lrkugq - Db Name: db0130 - Db Unique Name: db0130_phx1zn - Db Workload: OLTP - Id: ocid1.database.oc1.phx.anyhqljrabf7htyackgmsaqjfexoqgrzuuk33ju2q25z2al43tnd5mhhvkra - Display Name: dbsystem20220308221302 - Id: ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa - License Model: LICENSE_INCLUDED + Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE + Display Name: dbsystem1234 + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htya6crmqdnyz5h7ngpi4azbhndm6ssdmyn7yxk2uhbvxala + License Model: BRING_YOUR_OWN_LICENSE Network: - Client Subnet: k8test-pubvcn - Domain Name: k8testpubvcn.k8test.oraclevcn.com - Host Name: host0130 + Client Subnet: oke-nodesubnet-quick-cluster1-2bebe95db-regional + Domain Name: subdda0b5eaa.cluster1.oraclevcn.com + Host Name: host1234 Listener Port: 1521 - Scan Dns Name: host0130-scan.k8testpubvcn.k8test.oraclevcn.com - Vcn Name: k8test + Scan Dns Name: host1234-scan.subdda0b5eaa.cluster1.oraclevcn.com + Vcn Name: oke-vcn-quick-cluster1-2bebe95db Node Count: 1 Reco Storage Size In GB: 256 - Shape: VM.Standard2.1 + Shape: VM.Standard.E5.Flex State: AVAILABLE Storage Management: ASM - Subnet Id: ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a + Subnet Id: ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq Time Zone: UTC Work Requests: - Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljrxivzvgzel47zuoyke5yk36o7mrgjl27vscd5z3bqptmyh3rxwbqq + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljro3fhuxevjwxlue5gqq63q7rd7uhub2ru6gd6ay6k35f4hdeqqxkq Operation Type: Create DB System Percent Complete: 100 - Time Accepted: 2022-03-08 22:13:02.999 +0000 UTC - Time Finished: 2022-03-08 23:11:50.46 +0000 UTC - Time Started: 2022-03-08 22:13:16.995 +0000 UTC -Events: -[root@docker-test-server test]# \ No newline at end of file + Time Accepted: 2024-12-06 12:12:04.031 +0000 UTC + Time Finished: 2024-12-06 13:01:20.457 +0000 UTC + Time Started: 2024-12-06 12:12:11.041 +0000 UTC +Events: \ No newline at end of file diff --git a/docs/dbcs/provisioning/clone_dbcs_system.yaml b/docs/dbcs/provisioning/clone_dbcs_system.yaml new file mode 100644 index 00000000..fd6cc1d4 --- /dev/null +++ b/docs/dbcs/provisioning/clone_dbcs_system.yaml @@ -0,0 +1,20 @@ +apiVersion: database.oracle.com/v4 +kind: DbcsSystem +metadata: + name: dbcssystem-clone + namespace: default +spec: + id: "ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyaqui4hoqdyzmzl65jwkncyp3bnohengniqienetsdzw2q" + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + setupDBCloning: true + dbClone: + dbAdminPasswordSecret: "admin-password" + dbName: "db1212" + hostName: "host1213" + displayName: "dbsystem01312" + licenseModel: "BRING_YOUR_OWN_LICENSE" + domain: "subdda0b5eaa.cluster1.oraclevcn.com" + sshPublicKeys: + - "oci-publickey" + subnetId: "ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq" \ No newline at end of file diff --git a/docs/dbcs/provisioning/clone_dbcs_system_from_backup.yaml b/docs/dbcs/provisioning/clone_dbcs_system_from_backup.yaml new file mode 100644 index 00000000..54280af9 --- /dev/null +++ b/docs/dbcs/provisioning/clone_dbcs_system_from_backup.yaml @@ -0,0 +1,22 @@ +apiVersion: database.oracle.com/v4 +kind: DbcsSystem +metadata: + name: dbcssystem-clone + namespace: default +spec: + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + setupDBCloning: true + dbBackupId: "ocid1.dbbackup.oc1.ap-mumbai-1.anrg6ljrabf7htyaae3fmnpacavkuwt2zqaj5q3gol2g6m6tirriveytoarq" + dbClone: + dbAdminPasswordSecret: "admin-password" + tdeWalletPasswordSecret: "tde-password" + dbName: "db1212" + hostName: "host1213" + displayName: "dbsystem01312" + licenseModel: "BRING_YOUR_OWN_LICENSE" + domain: "subdda0b5eaa.cluster1.oraclevcn.com" + sshPublicKeys: + - "oci-publickey" + subnetId: "ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq" + initialDataStorageSizeInGB: 256 \ No newline at end of file diff --git a/docs/dbcs/provisioning/clone_dbcs_system_from_backup_sample_output.log b/docs/dbcs/provisioning/clone_dbcs_system_from_backup_sample_output.log new file mode 100644 index 00000000..82531993 --- /dev/null +++ b/docs/dbcs/provisioning/clone_dbcs_system_from_backup_sample_output.log @@ -0,0 +1,75 @@ +2024-09-18T12:55:33Z INFO Starting the clone process for DBCS from backup {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df", "dbcs": {"apiVersion": "database.oracle.com/v4", "kind": "DbcsSystem", "namespace": "default", "name": "dbcssystem-clone"}} +2024-09-18T12:55:33Z INFO Retrieved existing Db System Details from OCI using Spec.Id {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T12:55:41Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T12:56:42Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T12:57:43Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T12:58:44Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T12:59:45Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:00:46Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:01:47Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:02:47Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:03:48Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:04:49Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:05:50Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:06:51Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:07:52Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:08:53Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:09:53Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:10:54Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:11:55Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:12:56Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:13:57Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:14:58Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:15:59Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:17:00Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:18:01Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:19:02Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:20:02Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:21:03Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:22:05Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:23:05Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:24:06Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:25:08Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:26:08Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:27:09Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:28:10Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:29:11Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:30:12Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:31:13Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:32:14Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:33:15Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:34:16Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:35:16Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:36:17Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:37:18Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:38:19Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:39:20Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:40:21Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:41:22Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:42:23Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:43:23Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:44:24Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:45:25Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:46:26Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:47:27Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:48:28Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:49:29Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:50:30Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:51:31Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:52:32Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:53:32Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:54:33Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:55:34Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:56:35Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:57:36Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:58:37Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T13:59:38Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T14:00:39Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T14:01:40Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T14:02:41Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T14:03:42Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T14:04:42Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T14:05:43Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T14:06:44Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T14:07:45Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} +2024-09-18T14:08:46Z INFO DB Cloning completed successfully from provided backup DB system. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "a299bb7c-22eb-4db4-9037-ce81897345df"} \ No newline at end of file diff --git a/docs/dbcs/provisioning/clone_dbcs_system_from_database.yaml b/docs/dbcs/provisioning/clone_dbcs_system_from_database.yaml new file mode 100644 index 00000000..40767739 --- /dev/null +++ b/docs/dbcs/provisioning/clone_dbcs_system_from_database.yaml @@ -0,0 +1,22 @@ +apiVersion: database.oracle.com/v4 +kind: DbcsSystem +metadata: + name: dbcssystem-clone + namespace: default +spec: + databaseId: "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyapxtsgw6hy3kyosmrawefq2csm4kjv4d5au7biuiaabsq" + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + setupDBCloning: true + dbClone: + dbAdminPasswordSecret: "admin-password" + tdeWalletPasswordSecret: "tde-password" + dbName: "db1212" + hostName: "host1213" + displayName: "dbsystem01312" + licenseModel: "BRING_YOUR_OWN_LICENSE" + domain: "subdda0b5eaa.cluster1.oraclevcn.com" + sshPublicKeys: + - "oci-publickey" + subnetId: "ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq" + initialDataStorageSizeInGB: 256 \ No newline at end of file diff --git a/docs/dbcs/provisioning/clone_dbcs_system_from_database_sample_output.log b/docs/dbcs/provisioning/clone_dbcs_system_from_database_sample_output.log new file mode 100644 index 00000000..2881051d --- /dev/null +++ b/docs/dbcs/provisioning/clone_dbcs_system_from_database_sample_output.log @@ -0,0 +1,39 @@ +2024-09-19T19:23:08Z INFO Starting the clone process for Database {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "4c2b2567-052a-4a27-ae96-e18f655577d1", "dbcs": {"apiVersion": "database.oracle.com/v4", "kind": "DbcsSystem", "namespace": "default", "name": "dbcssystem-clone"}} +2024-09-19T19:23:08Z INFO Retrieved passwords from Kubernetes secrets {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "4c2b2567-052a-4a27-ae96-e18f655577d1"} +2024-09-19T19:23:09Z INFO Retrieved existing Database details from OCI {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "4c2b2567-052a-4a27-ae96-e18f655577d1", "DatabaseId": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyapxtsgw6hy3kyosmrawefq2csm4kjv4d5au7biuiaabsq"} +2024-09-20T08:51:45Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T08:52:46Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T08:53:46Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T08:54:47Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T08:55:48Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T08:56:49Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T08:57:50Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T08:58:51Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database +.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone" +, "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T08:59:52Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T09:00:53Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database +.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone" +, "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T09:01:53Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T09:02:54Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T09:03:55Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T09:04:56Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T09:52:39Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T09:53:40Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database +.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone" +, "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T09:54:41Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T09:55:41Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database +.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone" +, "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T09:56:42Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T09:57:43Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database +.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone" +, "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T09:58:44Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T09:59:45Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database +.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone" +, "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T10:00:46Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} +2024-09-20T10:01:47Z INFO DB Cloning completed successfully from provided backup DB system {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone", "reconcileID": "3ea6b5b1-1196-4279-bf8f-9e663f9a5543"} \ No newline at end of file diff --git a/docs/dbcs/provisioning/clone_dbcs_system_sample_output.log b/docs/dbcs/provisioning/clone_dbcs_system_sample_output.log new file mode 100644 index 00000000..22d86e1e --- /dev/null +++ b/docs/dbcs/provisioning/clone_dbcs_system_sample_output.log @@ -0,0 +1,60 @@ +2024-09-17T11:40:26Z INFO Starting the clone process for DBCS {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3", "dbcs": {"apiVersion": "database.oracle.com/v4", "kind": "DbcsSystem", "namespace": "default", "name": "dbcssystem-clone.yaml"}} +2024-09-17T11:40:26Z INFO Retrieved passwords from Kubernetes secrets {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:40:26Z INFO Retrieved existing Db System Details from OCI using Spec.Id {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:40:33Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:41:33Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:42:34Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:43:35Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:44:36Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:45:37Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:46:38Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:47:39Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:48:40Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:49:41Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:50:42Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:51:43Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:52:44Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:53:45Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:54:45Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:55:46Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:56:47Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:57:48Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:58:49Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T11:59:50Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:00:51Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:01:51Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:02:52Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:03:53Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:04:54Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:05:55Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:06:56Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:07:57Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:08:58Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:09:59Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:11:00Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:12:01Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:13:01Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:14:02Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:15:03Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:16:04Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:17:05Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:18:06Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:19:07Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:20:08Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:21:08Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:22:09Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:23:10Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:24:11Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:25:12Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:26:13Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:27:14Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:28:15Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:29:16Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:30:16Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:31:17Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:32:18Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:33:19Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:34:20Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:35:21Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:36:22Z INFO DB System current state is still:PROVISIONING. Sleeping for 60 seconds. {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} +2024-09-17T12:36:22Z INFO DB Cloning completed successfully from provided db system {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-clone.yaml","namespace":"default"}, "namespace": "default", "name": "dbcssystem-clone.yaml", "reconcileID": "bf0c98c6-13b6-4c15-938a-3594cd4cf1f3"} diff --git a/docs/dbcs/provisioning/clone_from_backup_dbcs.md b/docs/dbcs/provisioning/clone_from_backup_dbcs.md new file mode 100644 index 00000000..4597cff7 --- /dev/null +++ b/docs/dbcs/provisioning/clone_from_backup_dbcs.md @@ -0,0 +1,36 @@ +# Clone DB System from Backup of Existing DB System in OCI Oracle Base Database System (OBDS) + +In this use case, an existing OCI OBDS system deployed earlier with the Backup is going to be cloned. + +In order to clone OBDS to an existing OBDS system using Backup, get the details of OCID of backup in OCI OBDS. + +**NOTE:** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequisites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +This example uses `clone_dbcs_system_from_backup.yaml` to clone a Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: + +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- setupDBCloning: as `true` +- OCID of Backup DB as `dbBackupId` of existing OBDS system. +- Specification for DB Cloning as `dbClone`-> `dbAdminPasswordSecret`,`tdeWalletPasswordSecret`, `dbName`,`hostName`,`displayName`,`licenseModel`,`domain`,`sshPublicKeys`,`subnetId`, `initialDataStorageSizeInGB` +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). + +Use the file: [clone_dbcs_system_from_backup.yaml](./clone_dbcs_system_from_backup.yaml) for this use case as below: + +1. Deploy the .yaml file: +```sh +[root@docker-test-server OBDS]# kubectl apply -f clone_dbcs_system_from_backup.yaml +dbcssystem.database.oracle.com/dbcssystem-clone created +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB creation of PDBs. + +NOTE: Check the DB Operator Pod name in your environment. + +``` +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +## Sample Output + +[Here](./clone_dbcs_system_from_backup_sample_output.log) is the sample output for cloning an existing OBDS System deployed in OCI using Oracle DB Operator OBDS Controller. diff --git a/docs/dbcs/provisioning/clone_from_database.md b/docs/dbcs/provisioning/clone_from_database.md new file mode 100644 index 00000000..05b294b5 --- /dev/null +++ b/docs/dbcs/provisioning/clone_from_database.md @@ -0,0 +1,35 @@ +# Clone DB System from Existing Database of DB System in OCI Oracle Base Database System (OBDS) + +In this use case, an existing OCI OBDS system deployed earlier with existing Database is going to be cloned in OCI Base OBDS Service using existing Database ID. + +As an pre-requisite, get the details of OCID of database of an existing OBDS System which you want to clone. + +**NOTE:** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequisites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +This example uses `clone_dbcs_system_from_database.yaml` to clone a Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: +- OCID of existing as `databaseId` +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- setupDBCloning: as `true` +- Specification of dbClone as - Details of new DB system for cloning `dbAdminPasswordSecret`,`tdeWalletPasswordSecret`, `dbName`,`hostName`,`displayName`,`licenseModel`,`domain`,`sshPublicKeys`,`subnetId`, `initialDataStorageSizeInGB` +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). + +Use the file: [clone_dbcs_system_from_database.yaml](./clone_dbcs_system_from_database.yaml) for this use case as below: + +1. Deploy the .yaml file: +```sh +[root@docker-test-server OBDS]# kubectl apply -f clone_dbcs_system_from_database.yaml +dbcssystem.database.oracle.com/dbcssystem-clone created +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB creation of PDBs. + +NOTE: Check the DB Operator Pod name in your environment. + +``` +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +## Sample Output + +[Here](./clone_dbcs_system_from_database_sample_output.log) is the sample output for cloning an existing OBDS System deployed in OCI using Oracle DB Operator OBDS Controller. diff --git a/docs/dbcs/provisioning/clone_from_existing_dbcs.md b/docs/dbcs/provisioning/clone_from_existing_dbcs.md new file mode 100644 index 00000000..61665188 --- /dev/null +++ b/docs/dbcs/provisioning/clone_from_existing_dbcs.md @@ -0,0 +1,36 @@ +# Clone DB System from Existing DB System in OCI Oracle Base Database System (OBDS) + +In this use case, an existing OCI OBDS system deployed earlier is going to be cloned in OCI Oracle Base Database System (OBDS). Its a 2 Step operation. + +In order to clone OBDS to an existing OBDS system, get the OCID of DB System ID you want to clone. + +**NOTE:** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequisites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +This example uses `clone_dbcs_system.yaml` to clone a Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: + +- OCID of existing VMDB as `id` to be cloned. +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- setupDBCloning: as `true` +- Specification of DB System been cloned as `dbClone` -> `dbAdminPaswordSecret`, `dbName`,`hostName`,`displayName`,`licenseModel`,`domain`,`sshPublicKeys`,`subnetId`. These must be unique and new details for new cloned DB system to be created. +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). + +Use the file: [clone_dbcs_system.yaml](./clone_dbcs_system.yaml) for this use case as below: + +1. Deploy the .yaml file: +```sh +[root@docker-test-server DBCS]# kubectl apply -f clone_dbcs_system.yaml +dbcssystem.database.oracle.com/dbcssystem-clone created +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the DBCS VMDB creation of PDBs. + +NOTE: Check the DB Operator Pod name in your environment. + +``` +[root@docker-test-server DBCS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +## Sample Output + +[Here](./clone_dbcs_system_sample_output.log) is the sample output for cloning an existing DBCS System deployed in OCI using Oracle DB Operator DBCS Controller. diff --git a/docs/dbcs/provisioning/create_dbcs_with_kms.md b/docs/dbcs/provisioning/create_dbcs_with_kms.md new file mode 100644 index 00000000..97d912d4 --- /dev/null +++ b/docs/dbcs/provisioning/create_dbcs_with_kms.md @@ -0,0 +1,73 @@ +# Deploy a OBDS DB System alongwith KMS Vault Encryption in OCI + +In this use case, an OCI OBDS system is deployed using Oracle DB Operator OBDS controller along with KMS Vault configuration + +**NOTE** It is assumed that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +## Pre-requisites for KMS Vaults related to OBDS System +There is also other set of pre-requisites for KMS Vaults related to dynamic group and policies. Please follow instructions for same. +1. Create Dynamic group with rule `ALL {resource.compartment.id =` and give it some name. +2. Create policy in your compartment for this dynamic group to access to key/vaults by database. + +```txt +Allow dynamic-group <> to manage secret-family in compartment <> +Allow dynamic-group <> to manage instance-family in compartment <> +Allow dynamic-group <> to manage database-family in compartment <> +Allow dynamic-group <> to manage keys in compartment <> +Allow dynamic-group <> to manage vaults in compartment <> +``` + +E.g + +```txt +ALL {resource.compartment.id = 'ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a'} +``` +```txt +Allow dynamic-group db_dynamic_group to manage secret-family in compartment sauahuja +Allow dynamic-group db_dynamic_group to manage instance-family in compartment sauahuja +Allow dynamic-group db_dynamic_group to manage database-family in compartment sauahuja +Allow dynamic-group db_dynamic_group to manage keys in compartment sauahuja +Allow dynamic-group db_dynamic_group to manage vaults in compartment sauahuja +``` +3. Do also create KMS Vault and KMS Key in order to use it during OBDS provisioning. We are going to refer those variables (`vaultName`, `keyName`) in the yaml file. + +This example uses `dbcs_service_with_kms.yaml` to deploy a Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: + +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- Availability Domain for the OBDS VMDB as `OLou:AP-MUMBAI-1-AD-1` +- Compartment OCID as `ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a` +- Database Admin Credential as `admin-password` +- Database Name as `kmsdb` +- Oracle Database Software Image Version as `19c` +- Database Workload Type as Transaction Processing i.e. `OLTP` +- Database Hostname Prefix as `kmshost` +- Oracle VMDB Shape as `VM.Standard2.2` +- SSH Public key for the OBDS system being deployed as `oci-publickey` +- domain `subdda0b5eaa.cluster1.oraclevcn.com` +- OCID of the Subnet as `ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq` +- KMS Vault Name as `dbvault` +- KMS Compartment Id as `ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a` +- KMS Key Name as `dbkey` + +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). While giving KMS Vault make sure not to pass TDE wallet password in DB creation as either of them can be only used for encryption. + +Use the file: [dbcs_service_with_kms.yaml](./dbcs_service_with_kms.yaml) for this use case as below: + +1. Deploy the .yaml file: +```bash +[root@docker-test-server OBDS]# kubectl apply -f dbcs_service_with_kms.yaml +dbcssystem.database.oracle.com/dbcssystem-create created +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB deployment. + +NOTE: Check the DB Operator Pod name in your environment. + +```bash +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +## Sample Output + +[Here](./dbcs_service_with_kms_sample_output.log) is the sample output for a OBDS System deployed in OCI using Oracle DB Operator OBDS Controller with KMS configurations. diff --git a/docs/dbcs/provisioning/create_dbcs_with_pdb.md b/docs/dbcs/provisioning/create_dbcs_with_pdb.md new file mode 100644 index 00000000..d68a1991 --- /dev/null +++ b/docs/dbcs/provisioning/create_dbcs_with_pdb.md @@ -0,0 +1,55 @@ +# Deploy a OBDS DB System using OCI Oracle Base Database System (OBDS) alongwith PDB + +In this use case, an OCI OBDS system is deployed using Oracle DB Operator OBDS controller along with PDB configuration + +**NOTE** It is assumed that before this step, you have followed the [prerequisite](./../README.md#prerequisites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +Also, create a Kubernetes secret `pdb-password` using the file: + +```bash +#---assuming the PDB password is in ./pdb-password file" + +kubectl create secret generic pdb-password --from-file=./pdb-password -n default +``` + +This example uses `dbcs_service_with_pdb.yaml` to deploy a Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: + +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- Availability Domain for the OBDS VMDB as `OLou:US-ASHBURN-AD-1` +- Compartment OCID as `ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a` +- Database Admin Credential as `admin-password` +- Database Name as `dbsystem24` +- Oracle Database Software Image Version as `21c` +- Database Workload Type as Transaction Processing i.e. `OLTP` +- Database Hostname Prefix as `host24` +- Cpu Core Count as `1` +- Oracle VMDB Shape as `VM.Standard2.1` +- SSH Public key for the OBDS system being deployed as `oci-publickey` +- domain `subd215df3e6.k8stest.oraclevcn.com` +- OCID of the Subnet as `ocid1.subnet.oc1.iad.aaaaaaaa3lmmxwsykn2jc2vphzpq6eoyoqtte3dpwg6s5fzfkti22ibol2ua` +- PDB Name as `pdb_sauahuja_11` +- TDE Wallet Password as `tde-password` +- PDB Admin Password as `pdb-password` + +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). + +Use the file: [dbcs_service_with_pdb.yaml](./dbcs_service_with_pdb.yaml) for this use case as below: + +1. Deploy the .yaml file: +```bash +[root@docker-test-server OBDS]# kubectl apply -f dbcs_service_with_pdb.yaml +dbcssystem.database.oracle.com/dbcssystem-create-with-pdb created +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB deployment. + +NOTE: Check the DB Operator Pod name in your environment. + +```bash +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +## Sample Output + +[Here](./dbcs_service_with_pdb_sample_output.log) is the sample output for a OBDS System deployed in OCI using Oracle DB Operator OBDS Controller with PDB configurations. diff --git a/docs/dbcs/provisioning/create_kms.md b/docs/dbcs/provisioning/create_kms.md new file mode 100644 index 00000000..43db7037 --- /dev/null +++ b/docs/dbcs/provisioning/create_kms.md @@ -0,0 +1,50 @@ +# Create and update KMS vault to an existing DBCS System already deployed in OCI Base DBCS Service + +In this use case, an existing OCI DBCS system deployed earlier is going to have KMS Vault created and update DBCS System in OCI. Its a 2 Step operation. + +In order to create KMS Vaults to an existing DBCS system, the steps will be: + +1. Bind the existing DBCS System to DBCS Controller. +2. Apply the change to create KMS Vaults. + +**NOTE:** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +As step 1, first bind the existing DBCS System to DBCS Controller following [documentation](./../provisioning/bind_to_existing_dbcs_system.md). After successful binding, it will show as below- +```bash +kubectl get dbcssystems +NAME AGE +dbcssystem-existing 3m33s +``` +Below proceeding further create PDB Admin Password which is going to used as name suggests. + + +This example uses `dbcs_service_with_kms.yaml` to create KMS Vault to existing DBCS VMDB using Oracle DB Operator DBCS Controller with: + +- OCID of existing VMDB as `ocid1.dbsystem.oc1.iad.anuwcljsabf7htyag4akvoakzw4qk7cae55qyp7hlffbouozvyl5ngoputza` +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- Existing `dbSystem` used before to create DBCS system. +- kmsConfig - vaultName as "basdbvault" as an example. +- kmsConfig - keyName as "dbvaultkey" as an example. +- kmsConfig - compartmentId as "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" as an example. +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). + +Use the file: [dbcs_service_with_kms.yaml](./dbcs_service_with_kms.yaml) for this use case as below: + +1. Deploy the .yaml file: +```sh +[root@docker-test-server DBCS]# kubectl apply -f createpdb_in_existing_dbcs_system_list.yaml +dbcssystem.database.oracle.com/dbcssystem-existing configured +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the DBCS VMDB creation of KMS Vaults. + +NOTE: Check the DB Operator Pod name in your environment. + +``` +[root@docker-test-server DBCS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +## Sample Output + +[Here](./createkms_in_existing_dbcs_system_sample_output.log) is the sample output for creation of KMS Vaults on an existing DBCS System deployed in OCI using Oracle DB Operator DBCS Controller. diff --git a/docs/dbcs/provisioning/create_pdb.md b/docs/dbcs/provisioning/create_pdb.md new file mode 100644 index 00000000..610ccd41 --- /dev/null +++ b/docs/dbcs/provisioning/create_pdb.md @@ -0,0 +1,55 @@ +# Create PDB to an existing DBCS System + +In this use case, an existing OCI DBCS system deployed earlier is going to have PDB/PDBs created. Its a 2 Step operation. + +In order to create PDBs to an existing DBCS system, the steps will be: + +1. Bind the existing DBCS System to DBCS Controller. +2. Apply the change to create PDBs. + +**NOTE:** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +As step 1, first bind the existing DBCS System to DBCS Controller following [documentation](./../provisioning/bind_to_existing_dbcs_system.md). After successful binding, it will show as below- +```bash +kubectl get dbcssystems +NAME AGE +dbcssystem-existing 3m33s +``` +Below proceeding further create PDB Admin Password which is going to used as name suggests. + +Create a Kubernetes secret `pdb-password` using the file: + +```bash +#---assuming the PDB password is in ./pdb-password file" + +kubectl create secret generic pdb-password --from-file=./pdb-password -n default +``` + +This example uses `createpdb_in_existing_dbcs_system_list.yaml` to scale up a Single Instance DBCS VMDB using Oracle DB Operator DBCS Controller with: + +- OCID of existing VMDB as `ocid1.dbsystem.oc1.iad.anuwcljsabf7htyag4akvoakzw4qk7cae55qyp7hlffbouozvyl5ngoputza` +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- TDE Wallet Password as `tde-password` +- PDB Admin Password as `pdb-password` +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). + +Use the file: [createpdb_in_existing_dbcs_system_list.yaml](./createpdb_in_existing_dbcs_system_list.yaml) for this use case as below: + +1. Deploy the .yaml file: +```sh +[root@docker-test-server DBCS]# kubectl apply -f createpdb_in_existing_dbcs_system_list.yaml +dbcssystem.database.oracle.com/dbcssystem-existing configured +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the DBCS VMDB creation of PDBs. + +NOTE: Check the DB Operator Pod name in your environment. + +``` +[root@docker-test-server DBCS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +## Sample Output + +[Here](./createpdb_in_existing_dbcs_system_list_sample_output.log) is the sample output for creation of PDBs on an existing DBCS System deployed in OCI using Oracle DB Operator DBCS Controller. diff --git a/docs/dbcs/provisioning/create_pdb_to_existing_dbcs_system.md b/docs/dbcs/provisioning/create_pdb_to_existing_dbcs_system.md new file mode 100644 index 00000000..d1c4ed5b --- /dev/null +++ b/docs/dbcs/provisioning/create_pdb_to_existing_dbcs_system.md @@ -0,0 +1,55 @@ +# Create PDB to an existing OBDS System + +In this use case, an existing OCI OBDS system deployed earlier is going to have a PDB/many PDBs created. Its a 2 Step operation. + +In order to create PDBs to an existing OBDS system, the steps will be: + +1. Bind the existing OBDS System to OBDS Controller. +2. Apply the change to create PDBs. + +**NOTE:** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequisites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +As step 1, first bind the existing OBDS System to OBDS Controller following [documentation](./../provisioning/bind_to_existing_dbcs_system.md). After successful binding, it will show as below- +```bash +kubectl get dbcssystems +NAME AGE +dbcssystem-existing 3m33s +``` +Below proceeding further create PDB Admin Password which is going to used as name suggests. + +Create a Kubernetes secret `pdb-password` using the file: + +```bash +#---assuming the PDB password is in ./pdb-password file" + +kubectl create secret generic pdb-password --from-file=./pdb-password -n default +``` + +This example uses `createpdb_in_existing_dbcs_system_list.yaml` to scale up a Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: + +- OCID of existing VMDB as `ocid1.dbsystem.oc1.iad.anuwcljsabf7htya55wz5vfil7ul3pkzpubnymp6zrp3fhgomv3fcdr2vtiq` +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- TDE Wallet Password as `tde-password` +- PDB Admin Password as `pdb-password` +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). + +Use the file: [createpdb_in_existing_dbcs_system_list.yaml](./createpdb_in_existing_dbcs_system_list.yaml) for this use case as below: + +1. Deploy the .yaml file: +```sh +[root@docker-test-server OBDS]# kubectl apply -f createpdb_in_existing_dbcs_system_list.yaml +dbcssystem.database.oracle.com/dbcssystem-existing configured +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB creation of PDBs. + +NOTE: Check the DB Operator Pod name in your environment. + +``` +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +## Sample Output + +[Here](./createpdb_in_existing_dbcs_system_list_sample_output.log) is the sample output for creation of PDBs on an existing OBDS System deployed in OCI using Oracle DB Operator OBDS Controller. diff --git a/docs/dbcs/provisioning/createkms_in_existing_dbcs_system_sample_output.log b/docs/dbcs/provisioning/createkms_in_existing_dbcs_system_sample_output.log new file mode 100644 index 00000000..18ac916e --- /dev/null +++ b/docs/dbcs/provisioning/createkms_in_existing_dbcs_system_sample_output.log @@ -0,0 +1 @@ +# To be added \ No newline at end of file diff --git a/docs/dbcs/provisioning/createpdb_in_existing_dbcs_system_list.yaml b/docs/dbcs/provisioning/createpdb_in_existing_dbcs_system_list.yaml new file mode 100644 index 00000000..589ce0cf --- /dev/null +++ b/docs/dbcs/provisioning/createpdb_in_existing_dbcs_system_list.yaml @@ -0,0 +1,27 @@ +apiVersion: database.oracle.com/v4 +kind: DbcsSystem +metadata: + name: dbcssystem-existing +spec: + id: "ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa" + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + pdbConfigs: + - pdbName: "pdb_sauahuja_sdk_13" + tdeWalletPassword: "tde-password" + pdbAdminPassword: "pdb-password" + shouldPdbAdminAccountBeLocked: false + freeformTags: + Department: "Finance" + - pdbName: "pdb_sauahuja_sdk_14" + tdeWalletPassword: "tde-password" + pdbAdminPassword: "pdb-password" + shouldPdbAdminAccountBeLocked: false + freeformTags: + Department: "HR" + - pdbName: "pdb_sauahuja_sdk_15" + tdeWalletPassword: "tde-password" + pdbAdminPassword: "pdb-password" + shouldPdbAdminAccountBeLocked: false + freeformTags: + Department: "IT" \ No newline at end of file diff --git a/docs/dbcs/provisioning/createpdb_in_existing_dbcs_system_list_sample_output.log b/docs/dbcs/provisioning/createpdb_in_existing_dbcs_system_list_sample_output.log new file mode 100644 index 00000000..9bee73c8 --- /dev/null +++ b/docs/dbcs/provisioning/createpdb_in_existing_dbcs_system_list_sample_output.log @@ -0,0 +1,185 @@ +2024-08-15T14:14:55Z INFO Database details fetched successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "DatabaseId": ["ocid1.database.oc1.iad.anuwcljsabf7htya5c2ttar7axxqq6qej3allfz23nvrtx6ilka4stdmrpga"]} +2024-08-15T14:14:55Z INFO Calling createPluggableDatabase {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "ctx:->": "context.Background.WithCancel.WithValue(type logr.contextKey, val ).WithValue(type controller.reconcileIDKey, val )", "dbcsInst:->": {"apiVersion": "database.oracle.com/v4", "kind": "DbcsSystem", "namespace": "default", "name": "dbcssystem-existing"}, "databaseIds:->": "ocid1.database.oc1.iad.anuwcljsabf7htya5c2ttar7axxqq6qej3allfz23nvrtx6ilka4stdmrpga", "compartmentId:->": "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a"} +2024-08-15T14:14:55Z INFO Checking if the pluggable database exists {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBName": "pdb_sauahuja_sdk_13"} +2024-08-15T14:14:55Z INFO TDE wallet password retrieved successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4"} +2024-08-15T14:14:55Z INFO PDB admin password retrieved successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4"} +2024-08-15T14:14:55Z INFO Creating pluggable database {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBName": "pdb_sauahuja_sdk_13"} +2024-08-15T14:14:56Z INFO Pluggable database creation initiated {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBName": "pdb_sauahuja_sdk_13", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahtowm4kb7rwjemwjnyyxy2nv525qqqpmjue2lua3rihq"} +2024-08-15T14:14:56Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahtowm4kb7rwjemwjnyyxy2nv525qqqpmjue2lua3rihq", "Status": "PROVISIONING"} +2024-08-15T14:15:26Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahtowm4kb7rwjemwjnyyxy2nv525qqqpmjue2lua3rihq", "Status": "PROVISIONING"} +2024-08-15T14:15:57Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahtowm4kb7rwjemwjnyyxy2nv525qqqpmjue2lua3rihq", "Status": "PROVISIONING"} +2024-08-15T14:16:27Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahtowm4kb7rwjemwjnyyxy2nv525qqqpmjue2lua3rihq", "Status": "PROVISIONING"} +2024-08-15T14:16:57Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahtowm4kb7rwjemwjnyyxy2nv525qqqpmjue2lua3rihq", "Status": "PROVISIONING"} +2024-08-15T14:17:27Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahtowm4kb7rwjemwjnyyxy2nv525qqqpmjue2lua3rihq", "Status": "PROVISIONING"} +2024-08-15T14:17:57Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahtowm4kb7rwjemwjnyyxy2nv525qqqpmjue2lua3rihq", "Status": "AVAILABLE"} +2024-08-15T14:17:57Z INFO Pluggable database successfully created {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBName": "pdb_sauahuja_sdk_13", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahtowm4kb7rwjemwjnyyxy2nv525qqqpmjue2lua3rihq"} +2024-08-15T14:17:59Z INFO Database details fetched successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "DatabaseId": ["ocid1.database.oc1.iad.anuwcljsabf7htya5c2ttar7axxqq6qej3allfz23nvrtx6ilka4stdmrpga"]} +2024-08-15T14:17:59Z INFO Calling createPluggableDatabase {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "ctx:->": "context.Background.WithCancel.WithValue(type logr.contextKey, val ).WithValue(type controller.reconcileIDKey, val )", "dbcsInst:->": {"apiVersion": "database.oracle.com/v4", "kind": "DbcsSystem", "namespace": "default", "name": "dbcssystem-existing"}, "databaseIds:->": "ocid1.database.oc1.iad.anuwcljsabf7htya5c2ttar7axxqq6qej3allfz23nvrtx6ilka4stdmrpga", "compartmentId:->": "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a"} +2024-08-15T14:17:59Z INFO Checking if the pluggable database exists {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBName": "pdb_sauahuja_sdk_14"} +2024-08-15T14:17:59Z INFO TDE wallet password retrieved successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4"} +2024-08-15T14:17:59Z INFO PDB admin password retrieved successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4"} +2024-08-15T14:18:00Z INFO Creating pluggable database {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBName": "pdb_sauahuja_sdk_14"} +2024-08-15T14:18:00Z INFO Pluggable database creation initiated {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBName": "pdb_sauahuja_sdk_14", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyajgpwlaeyxj72m773xrqm6a4masvm5symlmine6v47llq"} +2024-08-15T14:18:01Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyajgpwlaeyxj72m773xrqm6a4masvm5symlmine6v47llq", "Status": "PROVISIONING"} +2024-08-15T14:18:31Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyajgpwlaeyxj72m773xrqm6a4masvm5symlmine6v47llq", "Status": "PROVISIONING"} +2024-08-15T14:19:01Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyajgpwlaeyxj72m773xrqm6a4masvm5symlmine6v47llq", "Status": "PROVISIONING"} +2024-08-15T14:19:31Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyajgpwlaeyxj72m773xrqm6a4masvm5symlmine6v47llq", "Status": "PROVISIONING"} +2024-08-15T14:20:01Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyajgpwlaeyxj72m773xrqm6a4masvm5symlmine6v47llq", "Status": "PROVISIONING"} +2024-08-15T14:20:32Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyajgpwlaeyxj72m773xrqm6a4masvm5symlmine6v47llq", "Status": "PROVISIONING"} +2024-08-15T14:21:02Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyajgpwlaeyxj72m773xrqm6a4masvm5symlmine6v47llq", "Status": "AVAILABLE"} +2024-08-15T14:21:02Z INFO Pluggable database successfully created {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBName": "pdb_sauahuja_sdk_14", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyajgpwlaeyxj72m773xrqm6a4masvm5symlmine6v47llq"} +2024-08-15T14:21:03Z INFO Database details fetched successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "DatabaseId": ["ocid1.database.oc1.iad.anuwcljsabf7htya5c2ttar7axxqq6qej3allfz23nvrtx6ilka4stdmrpga"]} +2024-08-15T14:21:03Z INFO Calling createPluggableDatabase {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "ctx:->": "context.Background.WithCancel.WithValue(type logr.contextKey, val ).WithValue(type controller.reconcileIDKey, val )", "dbcsInst:->": {"apiVersion": "database.oracle.com/v4", "kind": "DbcsSystem", "namespace": "default", "name": "dbcssystem-existing"}, "databaseIds:->": "ocid1.database.oc1.iad.anuwcljsabf7htya5c2ttar7axxqq6qej3allfz23nvrtx6ilka4stdmrpga", "compartmentId:->": "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a"} +2024-08-15T14:21:03Z INFO Checking if the pluggable database exists {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBName": "pdb_sauahuja_sdk_15"} +2024-08-15T14:21:03Z INFO TDE wallet password retrieved successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4"} +2024-08-15T14:21:03Z INFO PDB admin password retrieved successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4"} +2024-08-15T14:21:04Z INFO Creating pluggable database {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBName": "pdb_sauahuja_sdk_15"} +2024-08-15T14:21:05Z INFO Pluggable database creation initiated {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBName": "pdb_sauahuja_sdk_15", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyaq2s5xfn6jpkehpcoclcrzgksbxsj426dynsqyq7ajhla"} +2024-08-15T14:21:05Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyaq2s5xfn6jpkehpcoclcrzgksbxsj426dynsqyq7ajhla", "Status": "PROVISIONING"} +2024-08-15T14:21:35Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyaq2s5xfn6jpkehpcoclcrzgksbxsj426dynsqyq7ajhla", "Status": "PROVISIONING"} +2024-08-15T14:22:05Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyaq2s5xfn6jpkehpcoclcrzgksbxsj426dynsqyq7ajhla", "Status": "PROVISIONING"} +2024-08-15T14:22:36Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyaq2s5xfn6jpkehpcoclcrzgksbxsj426dynsqyq7ajhla", "Status": "PROVISIONING"} +2024-08-15T14:23:06Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyaq2s5xfn6jpkehpcoclcrzgksbxsj426dynsqyq7ajhla", "Status": "AVAILABLE"} +2024-08-15T14:23:06Z INFO Pluggable database successfully created {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "3ed0d8d1-0d9c-4163-8c71-347c441420b4", "PDBName": "pdb_sauahuja_sdk_15", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyaq2s5xfn6jpkehpcoclcrzgksbxsj426dynsqyq7ajhla"} + + +# kubectl describe dbcssystems.database.oracle.com +Name: dbcssystem-existing +Namespace: default +Labels: +Annotations: lastSuccessfulSpec: + {"dbSystem":{"compartmentId":"","availabilityDomain":"","subnetId":"","shape":"","hostName":"","dbAdminPaswordSecret":"","dbBackupConfig":... +API Version: database.oracle.com/v4 +Kind: DbcsSystem +Metadata: + Creation Timestamp: 2024-12-10T10:54:17Z + Generation: 4 + Resource Version: 117823935 + UID: c9da1245-3582-4926-b311-c24d75e75003 +Spec: + Db System: + Availability Domain: + Compartment Id: + Db Admin Pasword Secret: + Db Backup Config: + Host Name: + Kms Config: + Shape: + Subnet Id: + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa + Kms Config: + Oci Config Map: oci-cred-mumbai + Oci Secret: oci-privatekey + Pdb Configs: + Freeform Tags: + Department: Finance + Pdb Admin Password: pdb-password + Pdb Name: pdb_sauahuja_sdk_13 + Should Pdb Admin Account Be Locked: false + Tde Wallet Password: tde-password + Freeform Tags: + Department: HR + Pdb Admin Password: pdb-password + Pdb Name: pdb_sauahuja_sdk_14 + Should Pdb Admin Account Be Locked: false + Tde Wallet Password: tde-password + Freeform Tags: + Department: IT + Pdb Admin Password: pdb-password + Pdb Name: pdb_sauahuja_sdk_15 + Should Pdb Admin Account Be Locked: false + Tde Wallet Password: tde-password +Status: + Availability Domain: OLou:AP-MUMBAI-1-AD-1 + Cpu Core Count: 1 + Data Storage Percentage: 80 + Data Storage Size In G Bs: 512 + Db Clone Status: + Db Db Unique Name: + Host Name: + Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE + Display Name: dbsystem1234 + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa + Kms Details Status: + License Model: BRING_YOUR_OWN_LICENSE + Network: + Client Subnet: oke-nodesubnet-quick-cluster1-2bebe95db-regional + Domain Name: subdda0b5eaa.cluster1.oraclevcn.com + Host Name: host1234 + Listener Port: 1521 + Scan Dns Name: host1234-scan.subdda0b5eaa.cluster1.oraclevcn.com + Vcn Name: oke-vcn-quick-cluster1-2bebe95db + Node Count: 1 + Pdb Details Status: + Pdb Config Status: + Freeform Tags: + Department: IT + Pdb Name: pdb_sauahuja_sdk_15 + Pdb State: AVAILABLE + Pluggable Database Id: ocid1.pluggabledatabase.oc1.ap-mumbai-1.anrg6ljrabf7htyazfddwgjlmpm3tctcnmqe7zwefzghr4wttij6u4lhh7bq + Pdb Config Status: + Pdb Name: cdb1_pdb1 + Pdb State: AVAILABLE + Pluggable Database Id: ocid1.pluggabledatabase.oc1.ap-mumbai-1.anrg6ljrabf7htyakgj4wuabus6z5kmalvob6r6b7vivkbsmmh7bjprzbuwa + Pdb Config Status: + Freeform Tags: + Department: Finance + Pdb Name: pdb_sauahuja_sdk_13 + Pdb State: AVAILABLE + Pluggable Database Id: ocid1.pluggabledatabase.oc1.ap-mumbai-1.anrg6ljrabf7htyakkcrbhf6cit3z2hbcvded5g2rc7r5obbxeax7dv527xq + Pdb Config Status: + Freeform Tags: + Department: HR + Pdb Name: pdb_sauahuja_sdk_14 + Pdb State: AVAILABLE + Pluggable Database Id: ocid1.pluggabledatabase.oc1.ap-mumbai-1.anrg6ljrabf7htyaqnht5ctcopuntaj74ptum27tbdk5rouvnfq5f2y3eyna + Reco Storage Size In GB: 256 + Shape: VM.Standard2.1 + State: AVAILABLE + Storage Management: ASM + Subnet Id: ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq + Time Zone: UTC + Work Requests: + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrtpnjzjidageolva6ytlzjfb2lqhbbrivm4lsb67xyjzyyke6bt4a + Operation Type: Update Shape + Percent Complete: 100 + Time Accepted: 2024-12-10 08:57:53.547 +0000 UTC + Time Finished: 2024-12-10 09:14:04.572 +0000 UTC + Time Started: 2024-12-10 08:57:57.588 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrxg7gov22vlcbqbnxrkl7t7xkcfya6w6gvck344jdf5vtqgw5wzgq + Operation Type: Update DB System + Percent Complete: 100 + Time Accepted: 2024-12-10 08:57:43.701 +0000 UTC + Time Finished: 2024-12-10 09:14:22.705 +0000 UTC + Time Started: 2024-12-10 08:57:53.873 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrk2efvqjda2t7k5iaerahw7wcyz5dq2zev2k55gmq2gvsjkui7hxq + Operation Type: Create DB System + Percent Complete: 100 + Time Accepted: 2024-12-10 05:19:52.499 +0000 UTC + Time Finished: 2024-12-10 07:59:19.083 +0000 UTC + Time Started: 2024-12-10 05:19:55.747 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljr4qmf6rdtcbrc5p2q7bev3igugtpgfbwc2laht22yyjzr2srrg7vq + Operation Type: Update DB System + Percent Complete: 100 + Time Accepted: 2024-12-10 10:57:27.313 +0000 UTC + Time Finished: 2024-12-10 11:15:50.597 +0000 UTC + Time Started: 2024-12-10 10:57:45.242 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljr2vehqv3vgrxr5mrmd6hoqxg2zr6m5eaunv3ip6bcrubcpvhudmia + Operation Type: Update Shape + Percent Complete: 100 + Time Accepted: 2024-12-10 10:57:44.95 +0000 UTC + Time Finished: 2024-12-10 11:15:40.364 +0000 UTC + Time Started: 2024-12-10 10:57:54.082 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljr36bt7ot5oq3otch4bu2axn3azkicot4zuwgwmxeupxr4siisydja + Operation Type: Scale Storage + Percent Complete: 100 + Time Accepted: 2024-12-10 11:44:49.369 +0000 UTC + Time Finished: 2024-12-10 11:58:45.01 +0000 UTC + Time Started: 2024-12-10 11:44:55.544 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrxdpmmaipuqke5yx3szyfnf2zwkfptz3jevlq3coicecfjihnm4kq + Operation Type: Scale Storage + Percent Complete: 100 + Time Accepted: 2024-12-10 11:44:55.255 +0000 UTC + Time Finished: 2024-12-10 11:58:25.229 +0000 UTC + Time Started: 2024-12-10 11:44:57.743 +0000 UTC +Events: \ No newline at end of file diff --git a/docs/dbcs/provisioning/dbcs_controller_parameters.md b/docs/dbcs/provisioning/dbcs_controller_parameters.md index 82fc3dc2..96bedf30 100644 --- a/docs/dbcs/provisioning/dbcs_controller_parameters.md +++ b/docs/dbcs/provisioning/dbcs_controller_parameters.md @@ -8,7 +8,7 @@ This page has the details of the parameters to define the specs related to an op | ociSecret | Kubernetes Secret created using PEM Key for OCI account in the prerequisites steps. | Y | String | | | | availabilityDomain | Availability Domain of the OCI region where you want to provision the DBCS System. | Y | String | | Please refer to this link: https://docs.oracle.com/en-us/iaas/Content/General/Concepts/regions.htm | | compartmentId | OCID of the OCI Compartment. | Y | String | | | -| dbAdminPaswordSecret | Kubernetes Secret created for DB Admin Account in prerequisites steps. | Y | String | | A strong password for SYS, SYSTEM, and PDB Admin. The password must be at least nine characters and contain at least two uppercase, two lowercase, two numbers, and two special characters. The special characters must be _, #, or -.| +| dbAdminPasswordSecret | Kubernetes Secret created for DB Admin Account in prerequisites steps. | Y | String | | A strong password for SYS, SYSTEM, and PDB Admin. The password must be at least nine characters and contain at least two uppercase, two lowercase, two numbers, and two special characters. The special characters must be _, #, or -.| | autoBackupEnabled | Whether to enable automatic backup or not. | N | Boolean | | True or False | | autoBackupWindow | Time window selected for initiating automatic backup for the database system. There are twelve available two-hour time windows. | N | String | | Please refer to this link: https://docs.oracle.com/en-us/iaas/api/#/en/database/20160918/datatypes/DbBackupConfig | | recoveryWindowsInDays | Number of days between the current and the earliest point of recoverability covered by automatic backups. | N | Integer | | Minimum: 1 and Maximum: 60 | diff --git a/docs/dbcs/provisioning/dbcs_service_migrate_to_kms.log b/docs/dbcs/provisioning/dbcs_service_migrate_to_kms.log new file mode 100644 index 00000000..2405a90a --- /dev/null +++ b/docs/dbcs/provisioning/dbcs_service_migrate_to_kms.log @@ -0,0 +1,132 @@ +2025-01-10T14:30:21Z INFO Updating KMS details in Existing Database {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc"} +2025-01-10T14:30:27Z INFO MigrateVaultKey request succeeded, waiting for database to reach the desired state {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc"} +2025-01-10T14:30:27Z INFO Starting to wait for the database to reach the desired state {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "DesiredState": "AVAILABLE", "Timeout": "2h0m0s"} +2025-01-10T14:30:27Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:30:27Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:31:28Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:31:28Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:32:29Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:32:29Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:33:30Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:33:30Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:34:31Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:34:31Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:35:32Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:35:32Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:36:33Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:36:33Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:37:34Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:37:34Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:38:35Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:38:35Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:30:21Z INFO Updating KMS details in Existing Database {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc"} +2025-01-10T14:30:27Z INFO MigrateVaultKey request succeeded, waiting for database to reach the desired state {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc"} +2025-01-10T14:30:27Z INFO Starting to wait for the database to reach the desired state {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "DesiredState": "AVAILABLE", "Timeout": "2h0m0s"} +2025-01-10T14:30:27Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:30:27Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:31:28Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:31:28Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:32:29Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:32:29Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:33:30Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:33:30Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:34:31Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:34:31Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:35:32Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:35:32Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:36:33Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:36:33Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:37:34Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:37:34Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:38:35Z INFO Database State {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING"} +2025-01-10T14:38:35Z INFO Database not in the desired state yet, waiting... {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "CurrentState": "UPDATING", "DesiredState": "AVAILABLE", "NextCheckIn": "1m0s"} +2025-01-10T14:40:37Z INFO Database reached the desired state {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc", "DatabaseID": "ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq", "State": "AVAILABLE"} +2025-01-10T14:40:39Z INFO KMS migration process completed successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "890fac49-f8c0-43a8-817b-fb94e96627cc"} + + +basedb/ $ kubectl describe dbcssystems.database.oracle.com/dbcssystem-existing +Name: dbcssystem-existing +Namespace: default +Labels: +Annotations: lastSuccessfulSpec: + {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a","availabilityDomain":""... +API Version: database.oracle.com/v4 +Kind: DbcsSystem +Metadata: + Creation Timestamp: 2025-01-10T14:29:37Z + Generation: 2 + Resource Version: 130979222 + UID: f7535120-dd4a-4cbc-9e29-b9f104904773 +Spec: + Db System: + Availability Domain: + Compartment Id: ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a + Db Admin Password Secret: admin-password + Db Backup Config: + Host Name: + Kms Config: + Shape: + Subnet Id: + Tde Wallet Password Secret: tde-password + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyafdoaarkuhhxjfgjrzjtxpbcaycib3woadfmcz545mwua + Kms Config: + Compartment Id: ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a + Key Name: dbkey + Vault Name: dbvault + Oci Config Map: oci-cred-mumbai + Oci Secret: oci-privatekey +Status: + Availability Domain: OLou:AP-MUMBAI-1-AD-1 + Cpu Core Count: 2 + Data Storage Percentage: 80 + Data Storage Size In G Bs: 256 + Db Clone Status: + Db Db Unique Name: + Host Name: + Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE + Db Info: + Db Home Id: ocid1.dbhome.oc1.ap-mumbai-1.anrg6ljrqlb5nxia3s627hwjr36bix3dnh4dlbny22tzcmb2a3b4rcp74clq + Db Name: cdb12 + Db Unique Name: cdb12_hf8_bom + Db Workload: OLTP + Id: ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyavlxjvktbs6xko5fbutwdwb3wgr2mnisovycrisj7abxq + Display Name: dbsys123 + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyafdoaarkuhhxjfgjrzjtxpbcaycib3woadfmcz545mwua + Kms Details Status: + Compartment Id: ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a + Key Id: ocid1.key.oc1.ap-mumbai-1.fbtxxaolaaavw.abrg6ljr63rcu5h6lmaeux752pzmp334zihovh3n2acags6zt37emab34yba + Key Name: dbkey + Management Endpoint: https://fbtxxaolaaavw-management.kms.ap-mumbai-1.oraclecloud.com + Vault Id: ocid1.vault.oc1.ap-mumbai-1.fbtxxaolaaavw.abrg6ljrbjokn2fwhh36tqzyog4yjrth3mj2emxea4fxmzw6z35zlmh65p2a + Vault Name: dbvault + License Model: BRING_YOUR_OWN_LICENSE + Network: + Client Subnet: oke-nodesubnet-quick-cluster1-2bebe95db-regional + Domain Name: subdda0b5eaa.cluster1.oraclevcn.com + Host Name: host01234 + Listener Port: 1521 + Scan Dns Name: host01234-scan.subdda0b5eaa.cluster1.oraclevcn.com + Vcn Name: oke-vcn-quick-cluster1-2bebe95db + Node Count: 1 + Pdb Details Status: + Pdb Config Status: + Freeform Tags: + Created By: MAA_TEAM + TEST: test_case_provision + Pdb Name: PDB0123 + Pdb State: AVAILABLE + Pluggable Database Id: ocid1.pluggabledatabase.oc1.ap-mumbai-1.anrg6ljrabf7htyafnt7gvokjw7cvzs6xjxw5nmlz6awzycqcnf57blcuefa + Reco Storage Size In GB: 256 + Shape: VM.Standard2.2 + State: AVAILABLE + Storage Management: ASM + Subnet Id: ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq + Time Zone: UTC + Work Requests: + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljroc46ic555q2rfcwxg3srsbq4indueiuvj7tlziyy63uz3pvpe4ra + Operation Type: Create DB System + Percent Complete: 100 + Time Accepted: 2025-01-10 06:30:12.411 +0000 UTC + Time Finished: 2025-01-10 07:51:04.59 +0000 UTC + Time Started: 2025-01-10 06:30:20.62 +0000 UTC +Events: \ No newline at end of file diff --git a/docs/dbcs/provisioning/dbcs_service_migrate_to_kms.yaml b/docs/dbcs/provisioning/dbcs_service_migrate_to_kms.yaml new file mode 100644 index 00000000..922f7eeb --- /dev/null +++ b/docs/dbcs/provisioning/dbcs_service_migrate_to_kms.yaml @@ -0,0 +1,16 @@ +apiVersion: database.oracle.com/v4 +kind: DbcsSystem +metadata: + name: dbcssystem-existing +spec: + id: "ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyafdoaarkuhhxjfgjrzjtxpbcaycib3woadfmcz545mwua" + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + dbSystem: + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" + dbAdminPasswordSecret: "admin-password" + tdeWalletPasswordSecret: "tde-password" + kmsConfig: + vaultName: "dbvault" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" + keyName: "dbkey" \ No newline at end of file diff --git a/docs/dbcs/provisioning/dbcs_service_with_2_node_rac.md b/docs/dbcs/provisioning/dbcs_service_with_2_node_rac.md index 1cfbe006..b9ce6931 100644 --- a/docs/dbcs/provisioning/dbcs_service_with_2_node_rac.md +++ b/docs/dbcs/provisioning/dbcs_service_with_2_node_rac.md @@ -1,39 +1,39 @@ -# Deploy a 2 Node RAC DB System using OCI DBCS Service +# Deploy a 2 Node RAC DB System using OCI OBDS Service -In this use case, a 2 Node RAC OCI DBCS system is deployed using Oracle DB Operator DBCS controller using all the available parameters in the .yaml file being used during the deployment. The type of the Storage Management in this case is ASM. +In this use case, a 2 Node RAC OCI OBDS system is deployed using Oracle DB Operator OBDS controller using all the available parameters in the .yaml file being used during the deployment. The type of the Storage Management in this case is ASM. **NOTE** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. -This example uses `dbcs_service_with_2_node_rac.yaml` to deploy a 2 Node RAC VMDB using Oracle DB Operator DBCS Controller with: +This example uses `dbcs_service_with_2_node_rac.yaml` to deploy a 2 Node RAC VMDB using Oracle DB Operator OBDS Controller with: - OCI Configmap as `oci-cred` - OCI Secret as `oci-privatekey` -- Availability Domain for the DBCS VMDB as `OLou:PHX-AD-1` - Cluster Name as `maa-cluster` -- Compartment OCID as `ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya` +- Availability Domain for the OBDS VMDB as `OLou:AP-MUMBAI-1-AD-1` +- Compartment OCID as `ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a` - Database Admin Credential as `admin-password` -- Enable flag for Automatic Backup for DBCS Database as `True` -- Auto Backup Window for DBCS Database as `SLOT_FOUR` +- Enable flag for Automatic Backup for OBDS Database as `True` +- Auto Backup Window for OBDS Database as `SLOT_FOUR` - Recovery Windows for Backup retention in days as `15` -- Oracle Database Edition as `ENTERPRISE_EDITION_EXTREME_PERFORMANCE` +- Oracle Database Edition as `STANDARD_EDITION` - Database Name as `db0130` -- Oracle Database Software Image Version as `21c` +- Oracle Database Software Image Version as `19c` - Database Workload Type as Transaction Processing i.e. `OLTP` - Redundancy of the ASM Disks as `EXTERNAL` -- Display Name for the DBCS System as `dbsystem0130` -- Database Hostname Prefix as `host0130` +- Display Name for the OBDS System as `dbsys123` +- Database Hostname Prefix as `host01234` - Initial Size of the DATA Storage in GB as `256` - License Model as `BRING_YOUR_OWN_LICENSE` -- Node count as `2` -- Name of the PDB to be created as `PDB0130` +- Name of the PDB to be created as `PDB0123` - Private IP explicitly assigned to be `10.0.1.99` -- Oracle VMDB Shape as `VM.Standard2.2` -- SSH Public key for the DBCS system being deployed as `oci-publickey` +- Node count as `2` +- Oracle VMDB Shape as `VM.Standard2.1` +- SSH Public key for the OBDS system being deployed as `oci-publickey` - Storage Management type as `ASM` -- OCID of the Subnet as `ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a` -- Tag the DBCS system with two key value pairs as `"TEST": "test_case_provision"` and `"CreatedBy": "MAA_TEAM"` +- OCID of the Subnet as `ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbv` +- Tag the OBDS system with two key value pairs as `"TEST": "test_case_provision"` and `"CreatedBy": "MAA_TEAM"` - TDE Wallet Secret as `tde-password` -- Time Zone for the DBCS System as `Europe/Berlin` +- Time Zone for the OBDS System as `Europe/Berlin` **NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). @@ -46,14 +46,14 @@ Use the file: [dbcs_service_with_all_parameters_asm.yaml](./dbcs_service_with_2_ dbcssystem.database.oracle.com/dbcssystem-create configured ``` -2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the DBCS VMDB deployment. +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB deployment. NOTE: Check the DB Operator Pod name in your environment. ``` -[root@docker-test-server DBCS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system ``` ## Sample Output -[Here](./dbcs_service_with_2_node_rac_sample_output.log) is the sample output for a 2 Node RAC DBCS System deployed in OCI using Oracle DB Operator DBCS Controller with all parameters and with Storage Management as ASM. +[Here](./dbcs_service_with_2_node_rac_sample_output.log) is the sample output for a 2 Node RAC OBDS System deployed in OCI using Oracle DB Operator OBDS Controller with all parameters and with Storage Management as ASM. diff --git a/docs/dbcs/provisioning/dbcs_service_with_2_node_rac.yaml b/docs/dbcs/provisioning/dbcs_service_with_2_node_rac.yaml index 168cb427..3b4f35e3 100644 --- a/docs/dbcs/provisioning/dbcs_service_with_2_node_rac.yaml +++ b/docs/dbcs/provisioning/dbcs_service_with_2_node_rac.yaml @@ -1,4 +1,4 @@ -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: DbcsSystem metadata: name: dbcssystem-create @@ -6,21 +6,22 @@ spec: ociConfigMap: "oci-cred" ociSecret: "oci-privatekey" dbSystem: - availabilityDomain: "OLou:PHX-AD-1" + availabilityDomain: "OLou:AP-MUMBAI-1-AD-1" clusterName: "maa-cluster" - compartmentId: "ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya" - dbAdminPaswordSecret: "admin-password" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" + dbAdminPasswordSecret: "admin-password" dbBackupConfig: autoBackupEnabled: True autoBackupWindow: "SLOT_FOUR" recoveryWindowsInDays: 15 - dbEdition: "ENTERPRISE_EDITION_EXTREME_PERFORMANCE" - dbName: "db0130" - dbVersion: "21c" + dbEdition: "ENTERPRISE_EDITION_HIGH_PERFORMANCE" + dbName: "cdb12" + displayName: "dbsys123" + licenseModel: "BRING_YOUR_OWN_LICENSE" + dbVersion: "19c" dbWorkload: "OLTP" diskRedundancy: "EXTERNAL" - displayName: "dbsystem0130" - hostName: "host0130" + hostName: "host01234" initialDataStorageSizeInGB: 256 licenseModel: "BRING_YOUR_OWN_LICENSE" nodeCount: 2 @@ -30,7 +31,7 @@ spec: sshPublicKeys: - "oci-publickey" storageManagement: "ASM" - subnetId: "ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a" + subnetId: "ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq" tags: "TEST": "test_case_provision" "CreatedBy": "MAA_TEAM" diff --git a/docs/dbcs/provisioning/dbcs_service_with_all_parameters_asm.md b/docs/dbcs/provisioning/dbcs_service_with_all_parameters_asm.md index 5ef94c30..7bd9abea 100644 --- a/docs/dbcs/provisioning/dbcs_service_with_all_parameters_asm.md +++ b/docs/dbcs/provisioning/dbcs_service_with_all_parameters_asm.md @@ -1,38 +1,37 @@ -# Create DBCS with All Parameters with Storage Management as ASM +# Create OBDS with All Parameters with Storage Management as ASM -In this use case, the an OCI DBCS system is deployed using Oracle DB Operator DBCS controller using all the available parameters in the .yaml file being used during the deployment. The type of the Storage Management in this case is ASM. +In this use case, the an OCI OBDS system is deployed using Oracle DB Operator OBDS controller using all the available parameters in the .yaml file being used during the deployment. The type of the Storage Management in this case is ASM. **NOTE** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. -This example uses `dbcs_service_with_all_parameters_asm.yaml` to deploy a Single Instance DBCS VMDB using Oracle DB Operator DBCS Controller with: +This example uses `dbcs_service_with_all_parameters_asm.yaml` to deploy a Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: - OCI Configmap as `oci-cred` - OCI Secret as `oci-privatekey` -- Availability Domain for the DBCS VMDB as `OLou:PHX-AD-1` -- Cluster Name as `maa-cluster` -- Compartment OCID as `ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya` +- Availability Domain for the OBDS VMDB as `OLou:AP-MUMBAI-1-AD-1` +- Compartment OCID as `ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a` - Database Admin Credential as `admin-password` -- Enable flag for Automatic Backup for DBCS Database as `True` -- Auto Backup Window for DBCS Database as `SLOT_FOUR` +- Enable flag for Automatic Backup for OBDS Database as `True` +- Auto Backup Window for OBDS Database as `SLOT_FOUR` - Recovery Windows for Backup retention in days as `15` - Oracle Database Edition as `STANDARD_EDITION` - Database Name as `db0130` -- Oracle Database Software Image Version as `21c` +- Oracle Database Software Image Version as `19c` - Database Workload Type as Transaction Processing i.e. `OLTP` - Redundancy of the ASM Disks as `EXTERNAL` -- Display Name for the DBCS System as `dbsystem0130` -- Database Hostname Prefix as `host0130` +- Display Name for the OBDS System as `dbsys123` +- Database Hostname Prefix as `host01234` - Initial Size of the DATA Storage in GB as `256` - License Model as `BRING_YOUR_OWN_LICENSE` -- Name of the PDB to be created as `PDB0130` +- Name of the PDB to be created as `PDB0123` - Private IP explicitly assigned to be `10.0.1.99` -- Oracle VMDB Shape as `VM.Standard2.1` -- SSH Public key for the DBCS system being deployed as `oci-publickey` +- Oracle VMDB Shape as `VM.Standard2.1` +- SSH Public key for the OBDS system being deployed as `oci-publickey` - Storage Management type as `ASM` -- OCID of the Subnet as `ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a` -- Tag the DBCS system with two key value pairs as `"TEST": "test_case_provision"` and `"CreatedBy": "MAA_TEAM"` +- OCID of the Subnet as `ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbv` +- Tag the OBDS system with two key value pairs as `"TEST": "test_case_provision"` and `"CreatedBy": "MAA_TEAM"` - TDE Wallet Secret as `tde-password` -- Time Zone for the DBCS System as `Europe/Berlin` +- Time Zone for the OBDS System as `Europe/Berlin` **NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). @@ -41,18 +40,18 @@ Use the file: [dbcs_service_with_all_parameters_asm.yaml](./dbcs_service_with_al 1. Deploy the .yaml file: ```sh -[root@docker-test-server DBCS]# kubectl apply -f dbcs_service_with_all_parameters_asm.yaml +[root@docker-test-server OBDS]# kubectl apply -f dbcs_service_with_all_parameters_asm.yaml dbcssystem.database.oracle.com/dbcssystem-create created ``` -2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the DBCS VMDB deployment. +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB deployment. NOTE: Check the DB Operator Pod name in your environment. ``` -[root@docker-test-server DBCS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system ``` ## Sample Output -[Here](./dbcs_service_with_all_parameters_asm_sample_output.log) is the sample output for a DBCS System deployed in OCI using Oracle DB Operator DBCS Controller with all parameters and with Storage Management as ASM. +[Here](./dbcs_service_with_all_parameters_asm_sample_output.log) is the sample output for a OBDS System deployed in OCI using Oracle DB Operator OBDS Controller with all parameters and with Storage Management as ASM. diff --git a/docs/dbcs/provisioning/dbcs_service_with_all_parameters_asm.yaml b/docs/dbcs/provisioning/dbcs_service_with_all_parameters_asm.yaml index 1dcec54c..34811df7 100644 --- a/docs/dbcs/provisioning/dbcs_service_with_all_parameters_asm.yaml +++ b/docs/dbcs/provisioning/dbcs_service_with_all_parameters_asm.yaml @@ -1,4 +1,4 @@ -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: DbcsSystem metadata: name: dbcssystem-create @@ -6,31 +6,32 @@ spec: ociConfigMap: "oci-cred" ociSecret: "oci-privatekey" dbSystem: - availabilityDomain: "OLou:PHX-AD-1" - compartmentId: "ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya" - dbAdminPaswordSecret: "admin-password" + availabilityDomain: "OLou:AP-MUMBAI-1-AD-1" + clusterName: "maa-cluster" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" + dbAdminPasswordSecret: "admin-password" dbBackupConfig: autoBackupEnabled: True autoBackupWindow: "SLOT_FOUR" recoveryWindowsInDays: 15 - dbEdition: "STANDARD_EDITION" - dbName: "db0130" - dbVersion: "21c" + dbEdition: "ENTERPRISE_EDITION_HIGH_PERFORMANCE" + dbName: "cdb12" + displayName: "dbsys123" + licenseModel: "BRING_YOUR_OWN_LICENSE" + dbVersion: "19c" dbWorkload: "OLTP" diskRedundancy: "EXTERNAL" - displayName: "dbsystem0130" - hostName: "host0130" + hostName: "host01234" initialDataStorageSizeInGB: 256 - licenseModel: "BRING_YOUR_OWN_LICENSE" - pdbName: "PDB0130" + pdbName: "PDB0123" privateIp: "10.0.1.99" - shape: "VM.Standard2.1" + shape: "VM.Standard2.2" sshPublicKeys: - "oci-publickey" storageManagement: "ASM" - subnetId: "ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a" + subnetId: "ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq" tags: "TEST": "test_case_provision" "CreatedBy": "MAA_TEAM" tdeWalletPasswordSecret: "tde-password" - timeZone: "Europe/Berlin" + timeZone: "Europe/Berlin" \ No newline at end of file diff --git a/docs/dbcs/provisioning/dbcs_service_with_all_parameters_asm_sample_output.log b/docs/dbcs/provisioning/dbcs_service_with_all_parameters_asm_sample_output.log index a2fc6690..eec26016 100644 --- a/docs/dbcs/provisioning/dbcs_service_with_all_parameters_asm_sample_output.log +++ b/docs/dbcs/provisioning/dbcs_service_with_all_parameters_asm_sample_output.log @@ -1,35 +1,36 @@ [root@docker-test-server test]# cat dbcs_service_with_all_parameters_asm.yaml -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: DbcsSystem metadata: name: dbcssystem-create spec: - ociConfigMap: "oci-cred" + ociConfigMap: "oci-cred-mumbai" ociSecret: "oci-privatekey" dbSystem: - availabilityDomain: "OLou:PHX-AD-1" - compartmentId: "ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya" + availabilityDomain: "OLou:AP-MUMBAI-1-AD-1" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" dbAdminPaswordSecret: "admin-password" dbBackupConfig: autoBackupEnabled: True autoBackupWindow: "SLOT_FOUR" recoveryWindowsInDays: 15 - dbEdition: "STANDARD_EDITION" - dbName: "db0130" - dbVersion: "21c" + dbEdition: "ENTERPRISE_EDITION_HIGH_PERFORMANCE" + dbName: "cdb12" + displayName: "dbsys123" + licenseModel: "BRING_YOUR_OWN_LICENSE" + dbVersion: "19c" dbWorkload: "OLTP" diskRedundancy: "EXTERNAL" - displayName: "dbsystem0130" - hostName: "host0130" + hostName: "host01234" initialDataStorageSizeInGB: 256 licenseModel: "BRING_YOUR_OWN_LICENSE" - pdbName: "PDB0130" + pdbName: "PDB0123" privateIp: "10.0.1.99" - shape: "VM.Standard2.1" + shape: "VM.Standard.E5.Flex" sshPublicKeys: - "oci-publickey" storageManagement: "ASM" - subnetId: "ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a" + subnetId: "ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq" tags: "TEST": "test_case_provision" "CreatedBy": "MAA_TEAM" @@ -41,9 +42,6 @@ spec: dbcssystem.database.oracle.com/dbcssystem-create created - - - [root@docker-test-server test]# kubectl get ns kubectl get allNAME STATUS AGE @@ -145,169 +143,100 @@ replicaset.apps/oracle-database-operator-controller-manager-665874bd57 3 [root@docker-test-server test]# kubectl describe dbcssystems.database.oracle.com dbcssystem-create +user/ $ k describe dbcssystems/dbcssystem-create Name: dbcssystem-create Namespace: default Labels: Annotations: lastSuccessfulSpec: - {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya","availabilityDomain":"O... -API Version: database.oracle.com/v1alpha1 + {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a","availabilityDomain":"O... +API Version: database.oracle.com/v4 Kind: DbcsSystem Metadata: - Creation Timestamp: 2022-03-09T02:59:43Z - Generation: 1 - Managed Fields: - API Version: database.oracle.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:kubectl.kubernetes.io/last-applied-configuration: - f:spec: - .: - f:dbSystem: - .: - f:availabilityDomain: - f:compartmentId: - f:dbAdminPaswordSecret: - f:dbBackupConfig: - .: - f:autoBackupEnabled: - f:autoBackupWindow: - f:recoveryWindowsInDays: - f:dbEdition: - f:dbName: - f:dbVersion: - f:dbWorkload: - f:diskRedundancy: - f:displayName: - f:hostName: - f:initialDataStorageSizeInGB: - f:licenseModel: - f:pdbName: - f:privateIp: - f:shape: - f:sshPublicKeys: - f:storageManagement: - f:subnetId: - f:tags: - .: - f:CreatedBy: - f:TEST: - f:tdeWalletPasswordSecret: - f:timeZone: - f:ociConfigMap: - f:ociSecret: - Manager: kubectl-client-side-apply - Operation: Update - Time: 2022-03-09T02:59:43Z - API Version: database.oracle.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - f:lastSuccessfulSpec: - f:spec: - f:id: - f:status: - .: - f:availabilityDomain: - f:cpuCoreCount: - f:dataStoragePercentage: - f:dataStorageSizeInGBs: - f:dbEdition: - f:dbInfo: - f:displayName: - f:id: - f:licenseModel: - f:network: - .: - f:clientSubnet: - f:domainName: - f:hostName: - f:listenerPort: - f:scanDnsName: - f:vcnName: - f:nodeCount: - f:recoStorageSizeInGB: - f:shape: - f:state: - f:storageManagement: - f:subnetId: - f:timeZone: - f:workRequests: - Manager: manager - Operation: Update - Time: 2022-03-09T03:59:22Z - Resource Version: 55276756 - UID: e7d874e7-3cd7-4b8b-8cd1-32d68795a38c + Creation Timestamp: 2024-12-09T09:42:08Z + Generation: 2 + Resource Version: 117337682 + UID: cc31eb51-56bc-48f5-926b-2453710b1592 Spec: Db System: - Availability Domain: OLou:PHX-AD-1 - Compartment Id: ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya + Availability Domain: OLou:AP-MUMBAI-1-AD-1 + Compartment Id: ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a Db Admin Pasword Secret: admin-password Db Backup Config: Auto Backup Enabled: true Auto Backup Window: SLOT_FOUR Recovery Windows In Days: 15 - Db Edition: STANDARD_EDITION - Db Name: db0130 - Db Version: 21c + Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE + Db Name: cdb12 + Db Version: 19c Db Workload: OLTP Disk Redundancy: EXTERNAL - Display Name: dbsystem0130 - Host Name: host0130 + Display Name: dbsys123 + Host Name: host01234 Initial Data Storage Size In GB: 256 - License Model: BRING_YOUR_OWN_LICENSE - Pdb Name: PDB0130 - Private Ip: 10.0.1.99 - Shape: VM.Standard2.1 + Kms Config: + License Model: BRING_YOUR_OWN_LICENSE + Pdb Name: PDB0123 + Private Ip: 10.0.1.99 + Shape: VM.Standard.E5.Flex Ssh Public Keys: oci-publickey Storage Management: ASM - Subnet Id: ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a + Subnet Id: ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq Tags: Created By: MAA_TEAM TEST: test_case_provision Tde Wallet Password Secret: tde-password Time Zone: Europe/Berlin - Oci Config Map: oci-cred - Oci Secret: oci-privatekey + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyap33onviyojoimevpazf4wtbnfsi5v5izah2s365wmyka + Kms Config: + Oci Config Map: oci-cred-mumbai + Oci Secret: oci-privatekey Status: - Availability Domain: OLou:PHX-AD-1 - Cpu Core Count: 1 + Availability Domain: OLou:AP-MUMBAI-1-AD-1 + Cpu Core Count: 2 Data Storage Percentage: 80 Data Storage Size In G Bs: 256 - Db Edition: STANDARD_EDITION + Db Clone Status: + Db Db Unique Name: + Host Name: + Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE Db Info: - Db Home Id: ocid1.dbhome.oc1.phx.anyhqljr5gy3jhqaubltt77vlwmsx7w5d5dvq6be7isglwbpqijfi5gflh5a - Db Name: db0130 - Db Unique Name: db0130_phx1sw + Db Home Id: ocid1.dbhome.oc1.ap-mumbai-1.anrg6ljrqlb5nxiav4nm27oy6tfbqqyukvcgba7nalyozrgwfvkt5f25fazq + Db Name: cdb12 + Db Unique Name: cdb12_z4b_bom Db Workload: OLTP - Id: ocid1.database.oc1.phx.anyhqljrabf7htya5bzvoxrrc2qu6yjw6c27hcsx32bp7c76vzy35kesa2nq - Display Name: dbsystem0130 - Id: ocid1.dbsystem.oc1.phx.anyhqljrabf7htyaz42sxinatef6xieeppxmwg3bwlw5chpefc52s4joraxq - License Model: BRING_YOUR_OWN_LICENSE + Id: ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyanuajzgrh6u5qtvlui4e7jtfwbcnx7lcplw36dy4u4fza + Display Name: dbsys123 + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyap33onviyojoimevpazf4wtbnfsi5v5izah2s365wmyka + Kms Details Status: + License Model: BRING_YOUR_OWN_LICENSE Network: - Client Subnet: k8test-pubvcn - Domain Name: k8testpubvcn.k8test.oraclevcn.com - Host Name: host0130 - Listener Port: 1521 - Scan Dns Name: host0130-scan.k8testpubvcn.k8test.oraclevcn.com - Vcn Name: k8test - Node Count: 1 - Reco Storage Size In GB: 256 - Shape: VM.Standard2.1 - State: AVAILABLE - Storage Management: ASM - Subnet Id: ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a - Time Zone: Europe/Berlin + Client Subnet: oke-nodesubnet-quick-cluster1-2bebe95db-regional + Domain Name: subdda0b5eaa.cluster1.oraclevcn.com + Host Name: host01234 + Listener Port: 1521 + Scan Dns Name: host01234-scan.subdda0b5eaa.cluster1.oraclevcn.com + Vcn Name: oke-vcn-quick-cluster1-2bebe95db + Node Count: 1 + Pdb Details Status: + Pdb Config Status: + Freeform Tags: + Created By: MAA_TEAM + TEST: test_case_provision + Pdb Name: PDB0123 + Pdb State: AVAILABLE + Pluggable Database Id: ocid1.pluggabledatabase.oc1.ap-mumbai-1.anrg6ljrabf7htya6wvrskrlk2fazy4pa25jcbinks7vsjdv4kxf5t6nxcxq + Reco Storage Size In GB: 256 + Shape: VM.Standard.E5.Flex + State: AVAILABLE + Storage Management: ASM + Subnet Id: ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq + Time Zone: UTC Work Requests: - Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljrlpxe723pq3z5fkeyfgbu4ewsysjcdrxiyxigponwosy44uhcpcsq + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrq63rk37tfqyu64lwason4rczllxmd5nk5iovdzbqkkk2d4nwp5ka Operation Type: Create DB System Percent Complete: 100 - Time Accepted: 2022-03-09 02:59:48.969 +0000 UTC - Time Finished: 2022-03-09 03:56:52.77 +0000 UTC - Time Started: 2022-03-09 02:59:56.287 +0000 UTC -Events: -[root@docker-test-server test]# \ No newline at end of file + Time Accepted: 2024-12-09 09:42:14.521 +0000 UTC + Time Finished: 2024-12-09 10:32:30.77 +0000 UTC + Time Started: 2024-12-09 09:42:21.084 +0000 UTC +Events: \ No newline at end of file diff --git a/docs/dbcs/provisioning/dbcs_service_with_all_parameters_lvm.md b/docs/dbcs/provisioning/dbcs_service_with_all_parameters_lvm.md index d6beeb16..1bc560f4 100644 --- a/docs/dbcs/provisioning/dbcs_service_with_all_parameters_lvm.md +++ b/docs/dbcs/provisioning/dbcs_service_with_all_parameters_lvm.md @@ -1,35 +1,37 @@ -# Create DBCS with All Parameters with Storage Management as LVM +# Create OBDS with All Parameters with Storage Management as LVM -In this use case, the an OCI DBCS system is deployed using Oracle DB Operator DBCS controller using all the available parameters in the .yaml file being used during the deployment. +In this use case, the an OCI OBDS system is deployed using Oracle DB Operator OBDS controller using all the available parameters in the .yaml file being used during the deployment. **NOTE** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. -This example uses `dbcs_service_with_all_parameters_lvm.yaml` to deploy a Single Instance DBCS VMDB using Oracle DB Operator DBCS Controller with: +This example uses `dbcs_service_with_all_parameters_lvm.yaml` to deploy a Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: - OCI Configmap as `oci-cred` - OCI Secret as `oci-privatekey` -- Availability Domain for the DBCS VMDB as `OLou:PHX-AD-1` -- Compartment OCID as `ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya` +- Availability Domain for the OBDS VMDB as `OLou:AP-MUMBAI-1-AD-1` +- Compartment OCID as `ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a` - Database Admin Credential as `admin-password` -- Enable flag for Automatic Backup for DBCS Database as `True` -- Auto Backup Window for DBCS Database as `SLOT_FOUR` +- Enable flag for Automatic Backup for OBDS Database as `True` +- Auto Backup Window for OBDS Database as `SLOT_FOUR` - Recovery Windows for Backup retention in days as `15` - Oracle Database Edition as `STANDARD_EDITION` - Database Name as `db0130` -- Oracle Database Software Image Version as `21c` +- Oracle Database Software Image Version as `19c` - Database Workload Type as Transaction Processing i.e. `OLTP` -- Display Name for the DBCS System as `dbsystem0130` -- Database Hostname Prefix as `host0130` +- Redundancy of the ASM Disks as `EXTERNAL` +- Display Name for the OBDS System as `dbsys123` +- Database Hostname Prefix as `host01234` - Initial Size of the DATA Storage in GB as `256` - License Model as `BRING_YOUR_OWN_LICENSE` -- Name of the PDB to be created as `PDB0130` -- Oracle VMDB Shape as `VM.Standard2.1` -- SSH Public key for the DBCS system being deployed as `oci-publickey` +- Name of the PDB to be created as `PDB0123` +- Private IP explicitly assigned to be `10.0.1.99` +- Oracle VMDB Shape as `VM.Standard2.1` +- SSH Public key for the OBDS system being deployed as `oci-publickey` - Storage Management type as `LVM` -- OCID of the Subnet as `ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a` -- Tag the DBCS system with two key value pairs as `"TEST": "test_case_provision"` and `"CreatedBy": "MAA_TEAM"` +- OCID of the Subnet as `ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbv` +- Tag the OBDS system with two key value pairs as `"TEST": "test_case_provision"` and `"CreatedBy": "MAA_TEAM"` - TDE Wallet Secret as `tde-password` -- Time Zone for the DBCS System as `Europe/Berlin` +- Time Zone for the OBDS System as `Europe/Berlin` **NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). @@ -38,18 +40,18 @@ Use the file: [dbcs_service_with_all_parameters_lvm.yaml](./dbcs_service_with_al 1. Deploy the .yaml file: ```sh -[root@docker-test-server DBCS]# kubectl apply -f dbcs_service_with_all_parameters_lvm.yaml +[root@docker-test-server OBDS]# kubectl apply -f dbcs_service_with_all_parameters_lvm.yaml dbcssystem.database.oracle.com/dbcssystem-create created ``` -2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the DBCS VMDB deployment. +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB deployment. NOTE: Check the DB Operator Pod name in your environment. ``` -[root@docker-test-server DBCS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system ``` ## Sample Output -[Here](./dbcs_service_with_all_parameters_lvm_sample_output.log) is the sample output for a DBCS System deployed in OCI using Oracle DB Operator DBCS Controller with all parameters and with Storage Management as LVM. +[Here](./dbcs_service_with_all_parameters_lvm_sample_output.log) is the sample output for a OBDS System deployed in OCI using Oracle DB Operator OBDS Controller with all parameters and with Storage Management as LVM. diff --git a/docs/dbcs/provisioning/dbcs_service_with_all_parameters_lvm.yaml b/docs/dbcs/provisioning/dbcs_service_with_all_parameters_lvm.yaml index 73208317..f76962d1 100644 --- a/docs/dbcs/provisioning/dbcs_service_with_all_parameters_lvm.yaml +++ b/docs/dbcs/provisioning/dbcs_service_with_all_parameters_lvm.yaml @@ -1,4 +1,4 @@ -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: DbcsSystem metadata: name: dbcssystem-create @@ -6,27 +6,30 @@ spec: ociConfigMap: "oci-cred" ociSecret: "oci-privatekey" dbSystem: - availabilityDomain: "OLou:PHX-AD-1" - compartmentId: "ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya" - dbAdminPaswordSecret: "admin-password" + availabilityDomain: "OLou:AP-MUMBAI-1-AD-1" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" + dbAdminPasswordSecret: "admin-password" dbBackupConfig: autoBackupEnabled: True autoBackupWindow: "SLOT_FOUR" recoveryWindowsInDays: 15 - dbEdition: "STANDARD_EDITION" - dbName: "db0130" - dbVersion: "21c" + dbEdition: "ENTERPRISE_EDITION_HIGH_PERFORMANCE" + dbName: "cdb12" + displayName: "dbsys123" + licenseModel: "BRING_YOUR_OWN_LICENSE" + dbVersion: "19c" dbWorkload: "OLTP" - displayName: "dbsystem0130" - hostName: "host0130" + diskRedundancy: "EXTERNAL" + hostName: "host01234" initialDataStorageSizeInGB: 256 licenseModel: "BRING_YOUR_OWN_LICENSE" - pdbName: "PDB0130" - shape: "VM.Standard2.1" + pdbName: "PDB0123" + privateIp: "10.0.1.99" + shape: "VM.Standard.E5.Flex" sshPublicKeys: - "oci-publickey" storageManagement: "LVM" - subnetId: "ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a" + subnetId: "ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq" tags: "TEST": "test_case_provision" "CreatedBy": "MAA_TEAM" diff --git a/docs/dbcs/provisioning/dbcs_service_with_kms.yaml b/docs/dbcs/provisioning/dbcs_service_with_kms.yaml new file mode 100644 index 00000000..691b17a1 --- /dev/null +++ b/docs/dbcs/provisioning/dbcs_service_with_kms.yaml @@ -0,0 +1,27 @@ +apiVersion: database.oracle.com/v4 +kind: DbcsSystem +metadata: + name: dbcssystem-create +spec: + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + dbSystem: + availabilityDomain: "OLou:AP-MUMBAI-1-AD-1" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" + dbAdminPasswordSecret: "admin-password" + dbEdition: "ENTERPRISE_EDITION_HIGH_PERFORMANCE" + dbName: "kmsdb" + displayName: "kmsdbsystem" + licenseModel: "BRING_YOUR_OWN_LICENSE" + dbVersion: "19c" + dbWorkload: "OLTP" + hostName: "kmshost" + shape: "VM.Standard2.2" + domain: "subdda0b5eaa.cluster1.oraclevcn.com" + sshPublicKeys: + - "oci-publickey" + subnetId: "ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq" + kmsConfig: + vaultName: "dbvault" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" + keyName: "dbkey" \ No newline at end of file diff --git a/docs/dbcs/provisioning/dbcs_service_with_kms_sample_output.log b/docs/dbcs/provisioning/dbcs_service_with_kms_sample_output.log new file mode 100644 index 00000000..7ddf7d2f --- /dev/null +++ b/docs/dbcs/provisioning/dbcs_service_with_kms_sample_output.log @@ -0,0 +1,91 @@ +kubectl describe dbcssystems.database.oracle.com/dbcssystem-create +Name: dbcssystem-create +Namespace: default +Labels: +Annotations: lastSuccessfulSpec: + {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a","availabilityDomain":"O... +API Version: database.oracle.com/v4 +Kind: DbcsSystem +Metadata: + Creation Timestamp: 2025-01-09T18:10:30Z + Generation: 2 + Resource Version: 130640272 + UID: 85e39113-0a02-4cf6-84d8-2270c543b0bf +Spec: + Db System: + Availability Domain: OLou:AP-MUMBAI-1-AD-1 + Compartment Id: ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a + Db Admin Password Secret: admin-password + Db Backup Config: + Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE + Db Name: kmsdb + Db Version: 19c + Db Workload: OLTP + Display Name: kmsdbsystem + Domain: subdda0b5eaa.cluster1.oraclevcn.com + Host Name: kmshost + Kms Config: + License Model: BRING_YOUR_OWN_LICENSE + Shape: VM.Standard2.2 + Ssh Public Keys: + oci-publickey + Subnet Id: ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyai52ll4ifn52jcwdwpvv2exqqfa2wptypvi46wibx5sea + Kms Config: + Compartment Id: ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a + Key Name: dbkey + Vault Name: dbvault + Oci Config Map: oci-cred-mumbai + Oci Secret: oci-privatekey +Status: + Availability Domain: OLou:AP-MUMBAI-1-AD-1 + Cpu Core Count: 2 + Data Storage Percentage: 80 + Data Storage Size In G Bs: 256 + Db Clone Status: + Db Db Unique Name: + Host Name: + Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE + Db Info: + Db Home Id: ocid1.dbhome.oc1.ap-mumbai-1.anrg6ljrqlb5nxiasjpqkykdchfksgas4k62cqsf6p5gkvubsj53fdokovnq + Db Name: kmsdb + Db Unique Name: kmsdb_7cb_bom + Db Workload: OLTP + Id: ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyarhrmiqzf4pari5sshhdglj6bpuijy3fupxvveblr2l6q + Display Name: kmsdbsystem + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyai52ll4ifn52jcwdwpvv2exqqfa2wptypvi46wibx5sea + Kms Details Status: + Compartment Id: ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a + Key Id: ocid1.key.oc1.ap-mumbai-1.fbtxxaolaaavw.abrg6ljr63rcu5h6lmaeux752pzmp334zihovh3n2acags6zt37emab34yba + Key Name: dbkey + Management Endpoint: https://fbtxxaolaaavw-management.kms.ap-mumbai-1.oraclecloud.com + Vault Id: ocid1.vault.oc1.ap-mumbai-1.fbtxxaolaaavw.abrg6ljrbjokn2fwhh36tqzyog4yjrth3mj2emxea4fxmzw6z35zlmh65p2a + Vault Name: dbvault + License Model: BRING_YOUR_OWN_LICENSE + Network: + Client Subnet: oke-nodesubnet-quick-cluster1-2bebe95db-regional + Domain Name: subdda0b5eaa.cluster1.oraclevcn.com + Host Name: kmshost + Listener Port: 1521 + Scan Dns Name: kmshost-scan.subdda0b5eaa.cluster1.oraclevcn.com + Vcn Name: oke-vcn-quick-cluster1-2bebe95db + Node Count: 1 + Pdb Details Status: + Pdb Config Status: + Pdb Name: kmsdb_pdb1 + Pdb State: AVAILABLE + Pluggable Database Id: ocid1.pluggabledatabase.oc1.ap-mumbai-1.anrg6ljrabf7htyaqbjqveoqzvn5dklbuc575xdrclsrkjt5juzzcelmuqla + Reco Storage Size In GB: 256 + Shape: VM.Standard2.2 + State: AVAILABLE + Storage Management: ASM + Subnet Id: ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq + Time Zone: UTC + Work Requests: + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrcik5rtyygbv7qzzxqsmv6dvdwlfb7i2k3pitfqr2zomspcnkx7oa + Operation Type: Create DB System + Percent Complete: 100 + Time Accepted: 2025-01-09 18:10:41.171 +0000 UTC + Time Finished: 2025-01-09 19:31:17.126 +0000 UTC + Time Started: 2025-01-09 18:10:49.668 +0000 UTC +Events: \ No newline at end of file diff --git a/docs/dbcs/provisioning/dbcs_service_with_minimal_parameters.md b/docs/dbcs/provisioning/dbcs_service_with_minimal_parameters.md index f6bc8185..0d75297b 100644 --- a/docs/dbcs/provisioning/dbcs_service_with_minimal_parameters.md +++ b/docs/dbcs/provisioning/dbcs_service_with_minimal_parameters.md @@ -1,24 +1,24 @@ -# Deploy a DBCS DB System using OCI DBCS Service with minimal parameters +# Deploy a DB System using OCI Oracle Base Database System (OBDS) with minimal parameters -In this use case, an OCI DBCS system is deployed using Oracle DB Operator DBCS controller using minimal required parameters in the .yaml file being used during the deployment. +In this use case, an OCI Oracle Base Database System (OBDS) system is deployed using Oracle DB Operator OBDS controller using minimal required parameters in the .yaml file being used during the deployment. **NOTE** It is assumed that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. -This example uses `dbcs_service_with_minimal_parameters.yaml` to deploy a Single Instance DBCS VMDB using Oracle DB Operator DBCS Controller with: +This example uses `dbcs_service_with_minimal_parameters.yaml` to deploy a Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: - OCI Configmap as `oci-cred` - OCI Secret as `oci-privatekey` -- Availability Domain for the DBCS VMDB as `OLou:EU-MILAN-1-AD-1` -- Compartment OCID as `ocid1.compartment.oc1..aaaaaaaaks5baeqlvv4kyj2jiwnrbxgzm3gsumcfy4c6ntj2ro5i3a5gzhhq` +- Availability Domain for the OBDS VMDB as `OLou:AP-MUMBAI-1-AD-1` +- Compartment OCID as `cid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a` - Database Admin Credential as `admin-password` -- Database Name as `dbsystem0130` +- Database Name as `dbsystem1234` - Oracle Database Software Image Version as `19c` - Database Workload Type as Transaction Processing i.e. `OLTP` -- Database Hostname Prefix as `host1205` +- Database Hostname Prefix as `host1234` - Oracle VMDB Shape as `VM.Standard2.1` -- SSH Public key for the DBCS system being deployed as `oci-publickey` +- SSH Public key for the OBDS system being deployed as `oci-publickey` - domain `vcndns.oraclevcn.com` -- OCID of the Subnet as `ocid1.subnet.oc1.eu-milan-1.aaaaaaaaeiy3tvcsnyg6upfp3ydtu7jmfnmoyifq2ax6y45b5qpdbpide5xa` +- OCID of the Subnet as `ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq` **NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). @@ -36,9 +36,9 @@ dbcssystem.database.oracle.com/dbcssystem-create created NOTE: Check the DB Operator Pod name in your environment. ``` -[root@docker-test-server DBCS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system ``` ## Sample Output -[Here](./dbcs_service_with_minimal_parameters_sample_output.log) is the sample output for a DBCS System deployed in OCI using Oracle DB Operator DBCS Controller with minimal parameters. +[Here](./dbcs_service_with_minimal_parameters_sample_output.log) is the sample output for a OBDS System deployed in OCI using Oracle DB Operator OBDS Controller with minimal parameters. diff --git a/docs/dbcs/provisioning/dbcs_service_with_minimal_parameters.yaml b/docs/dbcs/provisioning/dbcs_service_with_minimal_parameters.yaml index dc02cfa3..66e1c229 100644 --- a/docs/dbcs/provisioning/dbcs_service_with_minimal_parameters.yaml +++ b/docs/dbcs/provisioning/dbcs_service_with_minimal_parameters.yaml @@ -1,4 +1,4 @@ -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: DbcsSystem metadata: name: dbcssystem-create @@ -6,20 +6,18 @@ spec: ociConfigMap: "oci-cred" ociSecret: "oci-privatekey" dbSystem: - availabilityDomain: "OLou:EU-MILAN-1-AD-1" - compartmentId: "ocid1.compartment.oc1..aaaaaaaaks5baeqlvv4kyj2jiwnrbxgzm3gsumcfy4c6ntj2ro5i3a5gzhhq" - dbAdminPaswordSecret: "admin-password" + availabilityDomain: "OLou:AP-MUMBAI-1-AD-1" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" + dbAdminPasswordSecret: "admin-password" dbEdition: "ENTERPRISE_EDITION_HIGH_PERFORMANCE" - dbName: "testdb" - displayName: "dbsystem0130" + dbName: "cdb1" + displayName: "dbsystem1234" licenseModel: "BRING_YOUR_OWN_LICENSE" dbVersion: "19c" dbWorkload: "OLTP" - hostName: "host1205" + hostName: "host1234" shape: "VM.Standard2.1" - domain: "vcndns.oraclevcn.com" - sshPublicKeys: + domain: "subdda0b5eaa.cluster1.oraclevcn.com" + sshPublicKeys: - "oci-publickey" - subnetId: "ocid1.subnet.oc1.eu-milan-1.aaaaaaaaeiy3tvcsnyg6upfp3ydtu7jmfnmoyifq2ax6y45b5qpdbpide5xa" - - + subnetId: "ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq" diff --git a/docs/dbcs/provisioning/dbcs_service_with_minimal_parameters_sample_output.log b/docs/dbcs/provisioning/dbcs_service_with_minimal_parameters_sample_output.log index 133f109c..80860c51 100644 --- a/docs/dbcs/provisioning/dbcs_service_with_minimal_parameters_sample_output.log +++ b/docs/dbcs/provisioning/dbcs_service_with_minimal_parameters_sample_output.log @@ -1,135 +1,82 @@ -/usr/bin/kubectl describe dbcssystems.database.oracle.com dbcssystem-create +kubectl describe dbcssystems/dbcssystem-create Name: dbcssystem-create Namespace: default Labels: -Annotations: kubectl.kubernetes.io/last-applied-configuration: - {"apiVersion":"database.oracle.com/v1alpha1","kind":"DbcsSystem","metadata":{"annotations":{},"name":"dbcssystem-create","namespace":"defa... -API Version: database.oracle.com/v1alpha1 +Annotations: lastSuccessfulSpec: + {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a","availabilityDomain":"O... +API Version: database.oracle.com/v4 Kind: DbcsSystem Metadata: - Creation Timestamp: 2023-12-12T12:59:58Z - Generation: 1 - Managed Fields: - API Version: database.oracle.com/v1alpha1 - Fields Type: FieldsV1 - Fields V 1: - F : Metadata: - F : Annotations: - .: - F : Kubectl . Kubernetes . Io / Last - Applied - Configuration: - F : Spec: - .: - F : Db System: - .: - F : Availability Domain: - F : Compartment Id: - F : Db Admin Pasword Secret: - F : Db Edition: - F : Db Name: - F : Db Version: - F : Db Workload: - F : Display Name: - F : Domain: - F : Host Name: - F : License Model: - F : Shape: - F : Ssh Public Keys: - F : Subnet Id: - F : Oci Config Map: - F : Oci Secret: - Manager: kubectl - Operation: Update - Time: 2023-12-12T12:59:58Z - API Version: database.oracle.com/v1alpha1 - Fields Type: FieldsV1 - Fields V 1: - F : Status: - .: - F : Availability Domain: - F : Cpu Core Count: - F : Data Storage Percentage: - F : Data Storage Size In G Bs: - F : Db Edition: - F : Db Info: - F : Display Name: - F : Id: - F : License Model: - F : Network: - .: - F : Client Subnet: - F : Domain Name: - F : Host Name: - F : Listener Port: - F : Vcn Name: - F : Node Count: - F : Reco Storage Size In GB: - F : Shape: - F : State: - F : Storage Management: - F : Subnet Id: - F : Time Zone: - F : Work Requests: - Manager: manager - Operation: Update - Subresource: status - Time: 2023-12-12T14:12:36Z - Resource Version: 1571919 - UID: e11353f3-1334-4ca8-af31-4b638442f429 + Creation Timestamp: 2024-12-10T05:19:46Z + Generation: 2 + Resource Version: 117717259 + UID: 3ff13686-50ec-41e3-81c8-77bb6b5a8afa Spec: Db System: - Availability Domain: OLou:EU-MILAN-1-AD-1 - Compartment Id: ocid1.compartment.oc1..aaaaaaaaks5baeqlvv4kyj2jiwnrbxgzm3gsumcfy4c6ntj2ro5i3a5gzhhq + Availability Domain: OLou:AP-MUMBAI-1-AD-1 + Compartment Id: ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a Db Admin Pasword Secret: admin-password - Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE - Db Name: testdb - Db Version: 19c - Db Workload: OLTP - Display Name: dbsystem0130 - Domain: vcndns.oraclevcn.com - Host Name: host1205 - License Model: BRING_YOUR_OWN_LICENSE - Shape: VM.Standard2.1 + Db Backup Config: + Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE + Db Name: cdb1 + Db Version: 19c + Db Workload: OLTP + Display Name: dbsystem1234 + Domain: subdda0b5eaa.cluster1.oraclevcn.com + Host Name: host1234 + Kms Config: + License Model: BRING_YOUR_OWN_LICENSE + Shape: VM.Standard2.1 Ssh Public Keys: oci-publickey - Subnet Id: ocid1.subnet.oc1.eu-milan-1.aaaaaaaaeiy3tvcsnyg6upfp3ydtu7jmfnmoyifq2ax6y45b5qpdbpide5xa - Oci Config Map: oci-cred + Subnet Id: ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa + Kms Config: + Oci Config Map: oci-cred-mumbai Oci Secret: oci-privatekey Status: - Availability Domain: OLou:EU-MILAN-1-AD-1 + Availability Domain: OLou:AP-MUMBAI-1-AD-1 Cpu Core Count: 1 Data Storage Percentage: 80 Data Storage Size In G Bs: 256 - Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE + Db Clone Status: + Db Db Unique Name: + Host Name: + Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE Db Info: - Db Home Id: ocid1.dbhome.oc1.eu-milan-1.anwgsljrx2vveliazumnbyvudq3rwkbc4brtamgqzyrjuwfbtx5k7hlqwx2a - Db Name: testdb - Db Unique Name: testdb_fg4_lin + Db Home Id: ocid1.dbhome.oc1.ap-mumbai-1.anrg6ljrqlb5nxiaoqqlaxhx4urdwmefw4il5efzekneuru4bpfv57i7iy6a + Db Name: cdb1 + Db Unique Name: cdb1_tkf_bom Db Workload: OLTP - Id: ocid1.database.oc1.eu-milan-1.anwgsljrabf7htyasoe7b7mtfecc7tdfkp6w5knvvufxmk3phztxfktf6naq - Display Name: dbsystem0130 - Id: ocid1.dbsystem.oc1.eu-milan-1.anwgsljrabf7htyat3fsgcftfilt45bgbrfgawroa2oasamavsluwqyr5aya - License Model: BRING_YOUR_OWN_LICENSE + Id: ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyalxin4xpiggjh4nxlta6o6iq56hjrlh4of2cq6c4qgrqa + Display Name: dbsystem1234 + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa + Kms Details Status: + License Model: BRING_YOUR_OWN_LICENSE Network: - Client Subnet: vcnsbn - Domain Name: vcndns.oraclevcn.com - Host Name: host1205 - Listener Port: 1521 - Vcn Name: vcnnet - Node Count: 1 - Reco Storage Size In GB: 256 - Shape: VM.Standard2.1 - State: PROVISIONING - Storage Management: ASM - Subnet Id: ocid1.subnet.oc1.eu-milan-1.aaaaaaaaeiy3tvcsnyg6upfp3ydtu7jmfnmoyifq2ax6y45b5qpdbpide5xa - Time Zone: UTC + Client Subnet: oke-nodesubnet-quick-cluster1-2bebe95db-regional + Domain Name: subdda0b5eaa.cluster1.oraclevcn.com + Host Name: host1234 + Listener Port: 1521 + Scan Dns Name: host1234-scan.subdda0b5eaa.cluster1.oraclevcn.com + Vcn Name: oke-vcn-quick-cluster1-2bebe95db + Node Count: 1 + Pdb Details Status: + Pdb Config Status: + Pdb Name: cdb1_pdb1 + Pdb State: AVAILABLE + Pluggable Database Id: ocid1.pluggabledatabase.oc1.ap-mumbai-1.anrg6ljrabf7htyakgj4wuabus6z5kmalvob6r6b7vivkbsmmh7bjprzbuwa + Reco Storage Size In GB: 256 + Shape: VM.Standard2.1 + State: AVAILABLE + Storage Management: ASM + Subnet Id: ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq + Time Zone: UTC Work Requests: - Operation Id: ocid1.coreservicesworkrequest.oc1.eu-milan-1.abwgsljrkv6jwqtepnxyhnxgtzolw74bqfh5oqlwskq72dqgjpfs5rxu66wa + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrk2efvqjda2t7k5iaerahw7wcyz5dq2zev2k55gmq2gvsjkui7hxq Operation Type: Create DB System - Percent Complete: 0 - Time Accepted: 2023-12-12 13:00:03.156 +0000 UTC - Operation Id: ocid1.coreservicesworkrequest.oc1.eu-milan-1.abwgsljrxudjz2qivun6ypupqhytam4axv4wago7nuauceqyapbceysjukfq - Operation Type: Create DB System - Percent Complete: 0 - Time Accepted: 2023-12-12 14:12:36.047 +0000 UTC + Percent Complete: 100 + Time Accepted: 2024-12-10 05:19:52.499 +0000 UTC + Time Finished: 2024-12-10 07:59:19.083 +0000 UTC + Time Started: 2024-12-10 05:19:55.747 +0000 UTC Events: - diff --git a/docs/dbcs/provisioning/dbcs_service_with_pdb.yaml b/docs/dbcs/provisioning/dbcs_service_with_pdb.yaml new file mode 100644 index 00000000..7da5f729 --- /dev/null +++ b/docs/dbcs/provisioning/dbcs_service_with_pdb.yaml @@ -0,0 +1,38 @@ +apiVersion: database.oracle.com/v4 +kind: DbcsSystem +metadata: + name: dbcssystem-create-with-pdb + namespace: default +spec: + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + dbSystem: + availabilityDomain: "OLou:US-ASHBURN-AD-1" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" + dbAdminPasswordSecret: "admin-password" + dbEdition: "ENTERPRISE_EDITION_HIGH_PERFORMANCE" + dbName: "dbsys" + displayName: "dbsystem24" + licenseModel: "BRING_YOUR_OWN_LICENSE" + dbVersion: "21c" + dbWorkload: "OLTP" + hostName: "host24" + shape: "VM.Standard3.Flex" + cpuCoreCount: 1 + domain: "subd215df3e6.k8stest.oraclevcn.com" + sshPublicKeys: + - "oci-publickey" + subnetId: "ocid1.subnet.oc1.iad.aaaaaaaa3lmmxwsykn2jc2vphzpq6eoyoqtte3dpwg6s5fzfkti22ibol2ua" + pdbConfigs: + - pdbName: "pdb_sauahuja_11" + tdeWalletPassword: "tde-password" + pdbAdminPassword: "pdb-password" + shouldPdbAdminAccountBeLocked: false + freeformTags: + Department: "Finance" + - pdbName: "pdb_sauahuja_12" + tdeWalletPassword: "tde-password" + pdbAdminPassword: "pdb-password" + shouldPdbAdminAccountBeLocked: false + freeformTags: + Department: "HR" \ No newline at end of file diff --git a/docs/dbcs/provisioning/dbcs_service_with_pdb_sample_output.log b/docs/dbcs/provisioning/dbcs_service_with_pdb_sample_output.log new file mode 100644 index 00000000..15946a43 --- /dev/null +++ b/docs/dbcs/provisioning/dbcs_service_with_pdb_sample_output.log @@ -0,0 +1,137 @@ +2024-08-14T13:59:34Z INFO DbcsSystem system provisioned succesfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267"} +2024-08-14T13:59:35Z INFO DBInst after assignment {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "dbcsInst:->": {"apiVersion": "database.oracle.com/v1alpha1", "kind": "DbcsSystem", "namespace": "default", "name": "dbcssystem-create-with-pdb"}} +2024-08-14T13:59:36Z INFO Database details fetched successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "DatabaseId": ["ocid1.database.oc1.iad.anuwcljsabf7htyarsik3zmfezgl5tvvnmtf7wqm2n3cnvhyx5oo3nk5f6lq"]} +2024-08-14T13:59:36Z INFO Calling createPluggableDatabase {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "ctx:->": "context.Background.WithCancel.WithValue(type logr.contextKey, val ).WithValue(type controller.reconcileIDKey, val )", "dbcsInst:->": {"apiVersion": "database.oracle.com/v1alpha1", "kind": "DbcsSystem", "namespace": "default", "name": "dbcssystem-create-with-pdb"}, "databaseIds:->": "ocid1.database.oc1.iad.anuwcljsabf7htyarsik3zmfezgl5tvvnmtf7wqm2n3cnvhyx5oo3nk5f6lq", "compartmentId:->": "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a"} +2024-08-14T13:59:36Z INFO Checking if the pluggable database exists {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBName": "pdb_sauahuja_11"} +2024-08-14T13:59:36Z INFO TDE wallet password retrieved successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267"} +2024-08-14T13:59:36Z INFO PDB admin password retrieved successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267"} +2024-08-14T13:59:36Z INFO Creating pluggable database {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBName": "pdb_sauahuja_11"} +2024-08-14T13:59:37Z INFO Pluggable database creation initiated {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBName": "pdb_sauahuja_11", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htya4ydcdycxirop2jjf4htjcq6mnzavf6yyqfsuo74miviq"} +2024-08-14T13:59:37Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htya4ydcdycxirop2jjf4htjcq6mnzavf6yyqfsuo74miviq", "Status": "PROVISIONING"} +2024-08-14T14:00:07Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htya4ydcdycxirop2jjf4htjcq6mnzavf6yyqfsuo74miviq", "Status": "PROVISIONING"} +2024-08-14T14:00:38Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htya4ydcdycxirop2jjf4htjcq6mnzavf6yyqfsuo74miviq", "Status": "PROVISIONING"} +2024-08-14T14:01:08Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htya4ydcdycxirop2jjf4htjcq6mnzavf6yyqfsuo74miviq", "Status": "PROVISIONING"} +2024-08-14T14:01:38Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htya4ydcdycxirop2jjf4htjcq6mnzavf6yyqfsuo74miviq", "Status": "PROVISIONING"} +2024-08-14T14:02:08Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htya4ydcdycxirop2jjf4htjcq6mnzavf6yyqfsuo74miviq", "Status": "PROVISIONING"} +2024-08-14T14:02:38Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htya4ydcdycxirop2jjf4htjcq6mnzavf6yyqfsuo74miviq", "Status": "AVAILABLE"} +2024-08-14T14:02:38Z INFO Pluggable database successfully created {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBName": "pdb_sauahuja_11", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htya4ydcdycxirop2jjf4htjcq6mnzavf6yyqfsuo74miviq"} +2024-08-14T14:02:39Z INFO Database details fetched successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "DatabaseId": ["ocid1.database.oc1.iad.anuwcljsabf7htyarsik3zmfezgl5tvvnmtf7wqm2n3cnvhyx5oo3nk5f6lq"]} +2024-08-14T14:02:39Z INFO Calling createPluggableDatabase {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "ctx:->": "context.Background.WithCancel.WithValue(type logr.contextKey, val ).WithValue(type controller.reconcileIDKey, val )", "dbcsInst:->": {"apiVersion": "database.oracle.com/v1alpha1", "kind": "DbcsSystem", "namespace": "default", "name": "dbcssystem-create-with-pdb"}, "databaseIds:->": "ocid1.database.oc1.iad.anuwcljsabf7htyarsik3zmfezgl5tvvnmtf7wqm2n3cnvhyx5oo3nk5f6lq", "compartmentId:->": "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a"} +2024-08-14T14:02:39Z INFO Checking if the pluggable database exists {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBName": "pdb_sauahuja_12"} +2024-08-14T14:02:39Z INFO TDE wallet password retrieved successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267"} +2024-08-14T14:02:39Z INFO PDB admin password retrieved successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267"} +2024-08-14T14:02:39Z INFO Creating pluggable database {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBName": "pdb_sauahuja_12"} +2024-08-14T14:02:40Z INFO Pluggable database creation initiated {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBName": "pdb_sauahuja_12", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahbbg3hf56qhw55cou7465zmuukgv7hh46niu3dsoug3q"} +2024-08-14T14:02:40Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahbbg3hf56qhw55cou7465zmuukgv7hh46niu3dsoug3q", "Status": "PROVISIONING"} +2024-08-14T14:03:11Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahbbg3hf56qhw55cou7465zmuukgv7hh46niu3dsoug3q", "Status": "PROVISIONING"} +2024-08-14T14:03:41Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahbbg3hf56qhw55cou7465zmuukgv7hh46niu3dsoug3q", "Status": "PROVISIONING"} +2024-08-14T14:04:11Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahbbg3hf56qhw55cou7465zmuukgv7hh46niu3dsoug3q", "Status": "PROVISIONING"} +2024-08-14T14:04:41Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahbbg3hf56qhw55cou7465zmuukgv7hh46niu3dsoug3q", "Status": "PROVISIONING"} +2024-08-14T14:05:11Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahbbg3hf56qhw55cou7465zmuukgv7hh46niu3dsoug3q", "Status": "PROVISIONING"} +2024-08-14T14:05:42Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahbbg3hf56qhw55cou7465zmuukgv7hh46niu3dsoug3q", "Status": "PROVISIONING"} +2024-08-14T14:06:12Z INFO Checking pluggable database status {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahbbg3hf56qhw55cou7465zmuukgv7hh46niu3dsoug3q", "Status": "AVAILABLE"} +2024-08-14T14:06:12Z INFO Pluggable database successfully created {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-create-with-pdb","namespace":"default"}, "namespace": "default", "name": "dbcssystem-create-with-pdb", "reconcileID": "4a257c84-d61d-4373-aec5-1bca6abf8267", "PDBName": "pdb_sauahuja_12", "PDBID": "ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyahbbg3hf56qhw55cou7465zmuukgv7hh46niu3dsoug3q"} + + +#kubectl describe dbcssystems.database.oracle.com +Name: dbcssystem-create-with-pdb +Namespace: default +Labels: +Annotations: lastSuccessfulSpec: + {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a","availabilityDomain":"O... +API Version: database.oracle.com/v1alpha1 +Kind: DbcsSystem +Metadata: + Creation Timestamp: 2024-08-16T09:26:08Z + Generation: 1 + Resource Version: 68483815 + UID: 9dd15628-e47b-4d9c-8bc6-2388e51cba30 +Spec: + Db System: + Availability Domain: OLou:US-ASHBURN-AD-1 + Compartment Id: ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a + Cpu Core Count: 1 + Db Admin Pasword Secret: admin-password + Db Backup Config: + Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE + Db Name: dbsys + Db Version: 21c + Db Workload: OLTP + Display Name: dbsystem24 + Domain: subd215df3e6.k8stest.oraclevcn.com + Host Name: host24 + Kms Config: + License Model: BRING_YOUR_OWN_LICENSE + Shape: VM.Standard3.Flex + Ssh Public Keys: + oci-publickey + Subnet Id: ocid1.subnet.oc1.iad.aaaaaaaa3lmmxwsykn2jc2vphzpq6eoyoqtte3dpwg6s5fzfkti22ibol2ua + Kms Config: + Oci Config Map: oci-cred + Oci Secret: oci-privatekey + Pdb Configs: + Freeform Tags: + Department: Finance + Pdb Admin Password: pdb-password + Pdb Name: pdb_sauahuja_11 + Should Pdb Admin Account Be Locked: false + Tde Wallet Password: tde-password + Freeform Tags: + Department: HR + Pdb Admin Password: pdb-password + Pdb Name: pdb_sauahuja_12 + Should Pdb Admin Account Be Locked: false + Tde Wallet Password: tde-password +Status: + Availability Domain: OLou:US-ASHBURN-AD-1 + Cpu Core Count: 1 + Data Storage Percentage: 80 + Data Storage Size In G Bs: 256 + Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE + Db Info: + Db Home Id: ocid1.dbhome.oc1.iad.anuwcljsqlb5nxiaqfh3twuegmxxci5boocmowxd6kcczeq6e7jwqezfmbwq + Db Name: dbsys + Db Unique Name: dbsys_dss_iad + Db Workload: OLTP + Id: ocid1.database.oc1.iad.anuwcljsabf7htya5c2ttar7axxqq6qej3allfz23nvrtx6ilka4stdmrpga + Display Name: dbsystem24 + Id: ocid1.dbsystem.oc1.iad.anuwcljsabf7htya55wz5vfil7ul3pkzpubnymp6zrp3fhgomv3fcdr2vtiq + Kms Details Status: + License Model: BRING_YOUR_OWN_LICENSE + Network: + Client Subnet: oke-nodesubnet-quick-k8s-test-ae2addeb0-regional + Domain Name: subd215df3e6.k8stest.oraclevcn.com + Host Name: host24 + Listener Port: 1521 + Scan Dns Name: host24-scan.subd215df3e6.k8stest.oraclevcn.com + Vcn Name: oke-vcn-quick-k8s-test-ae2addeb0 + Node Count: 1 + Pdb Details Status: + Pdb Config Status: + Freeform Tags: + Department: Finance + Pdb Name: pdb_sauahuja_11 + Pdb State: AVAILABLE + Pluggable Database Id: ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htya4arzakcgum6mv7h6cqmxhepyrjzfs77mxhqt4f3gylxq + Should Pdb Admin Account Be Locked: false + Pdb Config Status: + Freeform Tags: + Department: HR + Pdb Name: pdb_sauahuja_12 + Pdb State: AVAILABLE + Pluggable Database Id: ocid1.pluggabledatabase.oc1.iad.anuwcljsabf7htyaiq6cyhxwqr4ad3pfn7g6e6nd2myiibj54tbg7vc27hfa + Should Pdb Admin Account Be Locked: false + Reco Storage Size In GB: 256 + Shape: VM.Standard3.Flex + State: AVAILABLE + Storage Management: ASM + Subnet Id: ocid1.subnet.oc1.iad.aaaaaaaa3lmmxwsykn2jc2vphzpq6eoyoqtte3dpwg6s5fzfkti22ibol2ua + Time Zone: UTC + Work Requests: + Operation Id: ocid1.coreservicesworkrequest.oc1.iad.abuwcljscvomyvuthyc5bnmgi4myo565mbaghtjbhscgvabiy4tyzahjtiba + Operation Type: Create DB System + Percent Complete: 100 + Time Accepted: 2024-08-14 18:28:36.996 +0000 UTC + Time Finished: 2024-08-14 19:44:28.607 +0000 UTC + Time Started: 2024-08-14 18:28:45.134 +0000 UTC +Events: diff --git a/docs/dbcs/provisioning/delete_pdb.md b/docs/dbcs/provisioning/delete_pdb.md new file mode 100644 index 00000000..84d676bc --- /dev/null +++ b/docs/dbcs/provisioning/delete_pdb.md @@ -0,0 +1,50 @@ +# Delete PDB of an existing DBCS System + +In this use case, an existing OCI DBCS system deployed earlier is going to have PDB/PDBs deleted. Its a 2 Step operation. + +In order to create PDBs to an existing DBCS system, the steps will be: + +1. Bind the existing DBCS System to DBCS Controller. +2. Apply the change to delete PDBs. + +**NOTE:** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. + +As step 1, first bind the existing DBCS System to DBCS Controller following [documentation](./../provisioning/bind_to_existing_dbcs_system.md). After successful binding, it will show as below- +```bash +kubectl get dbcssystems +NAME AGE +dbcssystem-existing 3m33s +``` + +This example uses `deletepdb_in_existing_dbcs_system_list.yaml` to delete PDBs of a Single Instance DBCS VMDB using Oracle DB Operator DBCS Controller with: + +- OCID of existing VMDB as `ocid1.dbsystem.oc1.iad.anuwcljsabf7htyag4akvoakzw4qk7cae55qyp7hlffbouozvyl5ngoputza` +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- PDB Name to be deleted e.g `pdb_sauahuja_11` and `pdb_sauahuja_12` +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). + +Use the file: [deletepdb_in_existing_dbcs_system_list.yaml](./deletepdb_in_existing_dbcs_system_list.yaml) for this use case as below: + +1. Deploy the .yaml file: +```sh +[root@docker-test-server DBCS]# kubectl apply -f deletepdb_in_existing_dbcs_system_list.yaml +dbcssystem.database.oracle.com/dbcssystem-existing configured +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the DBCS VMDB deletion of PDBs. + +NOTE: Check the DB Operator Pod name in your environment. + +```bash +[root@docker-test-server DBCS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +3. Remove DBCS Systems resource- +```bash +kubectl delete -f deletepdb_in_existing_dbcs_system_list.yaml +``` + +## Sample Output + +[Here](./deletepdb_in_existing_dbcs_system_list_sample_output.log) is the sample output for deletion of PDBs from an existing DBCS System deployed in OCI using Oracle DB Operator DBCS Controller. \ No newline at end of file diff --git a/docs/dbcs/provisioning/deletepdb_in_existing_dbcs_system_list.yaml b/docs/dbcs/provisioning/deletepdb_in_existing_dbcs_system_list.yaml new file mode 100644 index 00000000..fed3ec6c --- /dev/null +++ b/docs/dbcs/provisioning/deletepdb_in_existing_dbcs_system_list.yaml @@ -0,0 +1,13 @@ +kind: DbcsSystem +metadata: + name: dbcssystem-existing + namespace: default +spec: + id: "ocid1.dbsystem.oc1.iad.anuwcljsabf7htyag4akvoakzw4qk7cae55qyp7hlffbouozvyl5ngoputza" + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + pdbConfigs: + - pdbName: "pdb_sauahuja_11" + isDelete: true + - pdbName: "pdb_sauahuja_12" + isDelete: true \ No newline at end of file diff --git a/docs/dbcs/provisioning/deletepdb_in_existing_dbcs_system_list_sample_output.log b/docs/dbcs/provisioning/deletepdb_in_existing_dbcs_system_list_sample_output.log new file mode 100644 index 00000000..a4f75fa5 --- /dev/null +++ b/docs/dbcs/provisioning/deletepdb_in_existing_dbcs_system_list_sample_output.log @@ -0,0 +1,8 @@ +2024-07-01T12:34:44Z INFO Database details fetched successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "e9cdd6c8-a381-40c2-b621-5c97467f6808", "DatabaseId": ["ocid1.database.oc1.iad.anuwcljsabf7htyaxx3o46rynl5vyduxilwxeeafndy4cwqtkywkhcws435a"]} +2024-07-01T12:34:44Z INFO Deleting pluggable database {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "e9cdd6c8-a381-40c2-b621-5c97467f6808", "PDBName": "pdb_sauahuja_11"} +2024-07-01T12:34:44Z INFO PluggableDatabaseId is not specified, getting pluggable databaseID {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "e9cdd6c8-a381-40c2-b621-5c97467f6808"} +2024-07-01T12:34:45Z INFO Successfully deleted pluggable database {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "e9cdd6c8-a381-40c2-b621-5c97467f6808", "PDBName": "pdb_sauahuja_11"} +2024-07-01T12:34:46Z INFO Database details fetched successfully {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "e9cdd6c8-a381-40c2-b621-5c97467f6808", "DatabaseId": ["ocid1.database.oc1.iad.anuwcljsabf7htyaxx3o46rynl5vyduxilwxeeafndy4cwqtkywkhcws435a"]} +2024-07-01T12:34:46Z INFO Deleting pluggable database {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "e9cdd6c8-a381-40c2-b621-5c97467f6808", "PDBName": "pdb_sauahuja_12"} +2024-07-01T12:34:46Z INFO PluggableDatabaseId is not specified, getting pluggable databaseID {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "e9cdd6c8-a381-40c2-b621-5c97467f6808"} +2024-07-01T12:34:47Z INFO Successfully deleted pluggable database {"controller": "dbcssystem", "controllerGroup": "database.oracle.com", "controllerKind": "DbcsSystem", "DbcsSystem": {"name":"dbcssystem-existing","namespace":"default"}, "namespace": "default", "name": "dbcssystem-existing", "reconcileID": "e9cdd6c8-a381-40c2-b621-5c97467f6808", "PDBName": "pdb_sauahuja_12"} \ No newline at end of file diff --git a/docs/dbcs/provisioning/migrate_to_kms.md b/docs/dbcs/provisioning/migrate_to_kms.md new file mode 100644 index 00000000..0c5ee10c --- /dev/null +++ b/docs/dbcs/provisioning/migrate_to_kms.md @@ -0,0 +1,49 @@ +# Create and update KMS vault to an existing OBDS System already deployed in OCI Oracle Base Database System (OBDS) + +In this use case, an existing OCI OBDS system deployed earlier having encryption with TDE Wallet Password, will be migrated to have KMS Vault created and update OBDS System in OCI. Its a 2 Step operation. + +In order to create KMS Vaults to an existing OBDS system, the steps will be: + +1. Bind the existing OBDS System (having encryption enabled with TDE Wallet password) to the OBDS Controller. +2. Apply the change to create KMS Vaults. + +**NOTE:** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. It is also assumed that OBDS System you created earlier is using TDE Wallet password. + +As step 1, first bind the existing OBDS System to OBDS Controller following [documentation](./../provisioning/bind_to_existing_dbcs_system.md). After successful binding, it will show as below- +```bash +kubectl get dbcssystems +NAME AGE +dbcssystem-create 3m33s +``` +Below proceeding further create PDB Admin Password which is going to used as name suggests. + +This example uses `dbcs_service_migrate_to_kms.yaml` to create KMS Vault to existing OBDS VMDB having encryption already enabled earlier with TDE Wallet using Oracle DB Operator OBDS Controller with: + +- OCID of existing VMDB as `ocid1.dbsystem.oc1.iad.anuwcljsabf7htyaoja4v2kx5rcfe5w2onndjfpqjhjoakxgwxo2sbgei5iq` +- OCI Configmap as `oci-cred` +- OCI Secret as `oci-privatekey` +- Existing `dbSystem` details (`compartmentId`,`dbAdminPasswordSecret`,`tdeWalletPasswordSecret`)used before to create OBDS system. +- kmsConfig - vaultName as `dbvault` as an example. +- kmsConfig - keyName as `dbkey` as an example. +- kmsConfig - compartmentId as `ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a` as an example. +**NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). + +Use the file: [dbcs_service_migrate_to_kms.yaml](./dbcs_service_migrate_to_kms.yaml) for this use case as below: + +1. Deploy the .yaml file: +```sh +[root@docker-test-server OBDS]# kubectl apply -f dbcs_service_migrate_to_kms.yaml +dbcssystem.database.oracle.com/dbcssystem-existing configured +``` + +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB creation of KMS Vaults. + +NOTE: Check the DB Operator Pod name in your environment. + +``` +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +``` + +## Sample Output + +[Here](./dbcs_service_migrate_to_kms.log) is the sample output for creation of KMS Vaults on an existing OBDS System deployed in OCI using Oracle DB Operator OBDS Controller. diff --git a/docs/dbcs/provisioning/scale_down_dbcs_system_shape.md b/docs/dbcs/provisioning/scale_down_dbcs_system_shape.md index abe98eea..1f03ff9f 100644 --- a/docs/dbcs/provisioning/scale_down_dbcs_system_shape.md +++ b/docs/dbcs/provisioning/scale_down_dbcs_system_shape.md @@ -1,26 +1,26 @@ -# Scale Down the shape of an existing DBCS System +# Scale Down the shape of an existing OBDS System -In this use case, an existing OCI DBCS system deployed earlier is scaled down for its shape using Oracle DB Operator DBCS controller. Its a 2 Step operation. +In this use case, an existing OCI OBDS system deployed earlier is scaled down for its shape using Oracle DB Operator OBDS controller. Its a 2 Step operation. -In order to scale down an existing DBCS system, the steps will be: +In order to scale down an existing OBDS system, the steps will be: -1. Bind the existing DBCS System to DBCS Controller. +1. Bind the existing OBDS System to OBDS Controller. 2. Apply the change to scale down its shape. **NOTE** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. -This example uses `scale_down_dbcs_system_shape.yaml` to scale down a Single Instance DBCS VMDB using Oracle DB Operator DBCS Controller with: +This example uses `scale_down_dbcs_system_shape.yaml` to scale down a Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: -- OCID of existing VMDB as `ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa` +- OCID of existing VMDB as `ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa` - OCI Configmap as `oci-cred` - OCI Secret as `oci-privatekey` -- Availability Domain for the DBCS VMDB as `OLou:PHX-AD-1` -- Compartment OCID as `ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya` +- Availability Domain for the OBDS VMDB as `OLou:AP-MUMBAI-1-AD-1` +- Compartment OCID as `ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72` - Database Admin Credential as `admin-password` -- Database Hostname Prefix as `host0130` +- Database Hostname Prefix as `host1234` - Oracle VMDB target Shape as `VM.Standard2.1` -- SSH Public key for the DBCS system being deployed as `oci-publickey` -- OCID of the Subnet as `ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a` +- SSH Public key for the OBDS system being deployed as `oci-publickey` +- OCID of the Subnet as `ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq` **NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). @@ -28,18 +28,18 @@ Use the file: [scale_down_dbcs_system_shape.yaml](./scale_down_dbcs_system_shape 1. Deploy the .yaml file: ```sh -[root@docker-test-server DBCS]# kubectl apply -f scale_down_dbcs_system_shape.yaml +[root@docker-test-server OBDS]# kubectl apply -f scale_down_dbcs_system_shape.yaml dbcssystem.database.oracle.com/dbcssystem-existing configured ``` -2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the DBCS VMDB Scale down. +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB Scale down. NOTE: Check the DB Operator Pod name in your environment. ``` -[root@docker-test-server DBCS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system ``` ## Sample Output -[Here](./scale_down_dbcs_system_shape_sample_output.log) is the sample output for scaling down the shape of an existing DBCS System deployed in OCI using Oracle DB Operator DBCS Controller. +[Here](./scale_down_dbcs_system_shape_sample_output.log) is the sample output for scaling down the shape of an existing OBDS System deployed in OCI using Oracle DB Operator OBDS Controller. diff --git a/docs/dbcs/provisioning/scale_down_dbcs_system_shape.yaml b/docs/dbcs/provisioning/scale_down_dbcs_system_shape.yaml index 5e2cfb3f..f4394ddc 100644 --- a/docs/dbcs/provisioning/scale_down_dbcs_system_shape.yaml +++ b/docs/dbcs/provisioning/scale_down_dbcs_system_shape.yaml @@ -1,17 +1,18 @@ -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: DbcsSystem metadata: name: dbcssystem-existing spec: - id: "ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa" + id: "ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa" ociConfigMap: "oci-cred" ociSecret: "oci-privatekey" dbSystem: - availabilityDomain: "OLou:PHX-AD-1" - compartmentId: "ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya" - dbAdminPaswordSecret: "admin-password" - hostName: "host0130" + availabilityDomain: "OLou:AP-MUMBAI-1-AD-1" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" + dbAdminPasswordSecret: "admin-password" + hostName: "host1234" shape: "VM.Standard2.1" + domain: "subdda0b5eaa.cluster1.oraclevcn.com" sshPublicKeys: - - "oci-publickey" - subnetId: "ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a" + - "oci-publickey" + subnetId: "ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq" diff --git a/docs/dbcs/provisioning/scale_down_dbcs_system_shape_sample_output.log b/docs/dbcs/provisioning/scale_down_dbcs_system_shape_sample_output.log index acc45208..32e0a318 100644 --- a/docs/dbcs/provisioning/scale_down_dbcs_system_shape_sample_output.log +++ b/docs/dbcs/provisioning/scale_down_dbcs_system_shape_sample_output.log @@ -1,176 +1,6 @@ -[root@docker-test-server test]# kubectl describe dbcssystems.database.oracle.com dbcssystem-existing -Name: dbcssystem-existing -Namespace: default -Labels: -Annotations: lastSuccessfulSpec: - {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya","availabilityDomain":"O... -API Version: database.oracle.com/v1alpha1 -Kind: DbcsSystem -Metadata: - Creation Timestamp: 2022-03-08T23:27:48Z - Generation: 2 - Managed Fields: - API Version: database.oracle.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:kubectl.kubernetes.io/last-applied-configuration: - f:spec: - .: - f:dbSystem: - .: - f:availabilityDomain: - f:compartmentId: - f:dbAdminPaswordSecret: - f:hostName: - f:shape: - f:sshPublicKeys: - f:subnetId: - f:id: - f:ociConfigMap: - f:ociSecret: - Manager: kubectl-client-side-apply - Operation: Update - Time: 2022-03-08T23:32:50Z - API Version: database.oracle.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - f:lastSuccessfulSpec: - f:spec: - f:dbSystem: - f:cpuCoreCount: - f:dbBackupConfig: - f:dbEdition: - f:dbName: - f:dbUniqueName: - f:dbVersion: - f:diskRedundancy: - f:displayName: - f:faultDomains: - f:nodeCount: - f:status: - .: - f:availabilityDomain: - f:cpuCoreCount: - f:dataStoragePercentage: - f:dataStorageSizeInGBs: - f:dbEdition: - f:dbInfo: - f:displayName: - f:id: - f:licenseModel: - f:network: - .: - f:clientSubnet: - f:domainName: - f:hostName: - f:listenerPort: - f:scanDnsName: - f:vcnName: - f:nodeCount: - f:recoStorageSizeInGB: - f:shape: - f:state: - f:storageManagement: - f:subnetId: - f:timeZone: - f:workRequests: - Manager: manager - Operation: Update - Time: 2022-03-08T23:32:55Z - Resource Version: 55197836 - UID: 96d7bc49-33e9-42cc-8dd0-ada9a5a4c7e5 -Spec: - Db System: - Availability Domain: OLou:PHX-AD-1 - Compartment Id: ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya - Db Admin Pasword Secret: admin-password - Host Name: host0130 - Shape: VM.Standard2.2 - Ssh Public Keys: - oci-publickey - Subnet Id: ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a - Id: ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa - Oci Config Map: oci-cred - Oci Secret: oci-privatekey -Status: - Availability Domain: OLou:PHX-AD-1 - Cpu Core Count: 2 - Data Storage Percentage: 80 - Data Storage Size In G Bs: 256 - Db Edition: ENTERPRISE_EDITION - Db Info: - Db Home Id: ocid1.dbhome.oc1.phx.anyhqljr5gy3jhqat52milqwt3gq6lwohhacwg5yi4mtzq7c7hag53lrkugq - Db Name: db0130 - Db Unique Name: db0130_phx1zn - Db Workload: OLTP - Id: ocid1.database.oc1.phx.anyhqljrabf7htyackgmsaqjfexoqgrzuuk33ju2q25z2al43tnd5mhhvkra - Display Name: dbsystem20220308221302 - Id: ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa - License Model: LICENSE_INCLUDED - Network: - Client Subnet: k8test-pubvcn - Domain Name: k8testpubvcn.k8test.oraclevcn.com - Host Name: host0130 - Listener Port: 1521 - Scan Dns Name: host0130-scan.k8testpubvcn.k8test.oraclevcn.com - Vcn Name: k8test - Node Count: 1 - Reco Storage Size In GB: 256 - Shape: VM.Standard2.2 - State: AVAILABLE - Storage Management: ASM - Subnet Id: ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a - Time Zone: UTC - Work Requests: - Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljrxivzvgzel47zuoyke5yk36o7mrgjl27vscd5z3bqptmyh3rxwbqq - Operation Type: Create DB System - Percent Complete: 100 - Time Accepted: 2022-03-08 22:13:02.999 +0000 UTC - Time Finished: 2022-03-08 23:11:50.46 +0000 UTC - Time Started: 2022-03-08 22:13:16.995 +0000 UTC - Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljrc3fx6kwq4yxerk3ngztdbbngm7w4dnlddcdhxqxjn6e4kcyux5ca - Operation Type: Update Shape - Percent Complete: 100 - Time Accepted: 2022-03-08 23:33:42.807 +0000 UTC - Time Finished: 2022-03-08 23:46:21.126 +0000 UTC - Time Started: 2022-03-08 23:33:52.109 +0000 UTC -Events: -[root@docker-test-server test]# - - - - -[root@docker-test-server test]# cat scale_down_dbcs_system_shape.yaml -apiVersion: database.oracle.com/v1alpha1 -kind: DbcsSystem -metadata: - name: dbcssystem-existing -spec: - id: "ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa" - ociConfigMap: "oci-cred" - ociSecret: "oci-privatekey" - dbSystem: - availabilityDomain: "OLou:PHX-AD-1" - compartmentId: "ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya" - dbAdminPaswordSecret: "admin-password" - hostName: "host0130" - shape: "VM.Standard2.1" - sshPublicKeys: - - "oci-publickey" - subnetId: "ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a" -[root@docker-test-server test]# -[root@docker-test-server test]# [root@docker-test-server test]# kubectl apply -f scale_down_dbcs_system_shape.yaml dbcssystem.database.oracle.com/dbcssystem-existing configured - - - [root@docker-test-server test]# kubectl get ns kubectl get allNAME STATUS AGE @@ -221,158 +51,103 @@ replicaset.apps/oracle-database-operator-controller-manager-665874bd57 3 2022-03-09T00:38:18.344Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} - - - - - - - [root@docker-test-server test]# kubectl describe dbcssystems.database.oracle.com dbcssystem-existing Name: dbcssystem-existing Namespace: default Labels: Annotations: lastSuccessfulSpec: - {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya","availabilityDomain":"O... -API Version: database.oracle.com/v1alpha1 + {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a","availabilityDomain":"O... +API Version: database.oracle.com/v4 Kind: DbcsSystem Metadata: - Creation Timestamp: 2022-03-08T23:27:48Z - Generation: 3 - Managed Fields: - API Version: database.oracle.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:kubectl.kubernetes.io/last-applied-configuration: - f:spec: - .: - f:dbSystem: - .: - f:availabilityDomain: - f:compartmentId: - f:dbAdminPaswordSecret: - f:hostName: - f:shape: - f:sshPublicKeys: - f:subnetId: - f:id: - f:ociConfigMap: - f:ociSecret: - Manager: kubectl-client-side-apply - Operation: Update - Time: 2022-03-08T23:32:50Z - API Version: database.oracle.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - f:lastSuccessfulSpec: - f:spec: - f:dbSystem: - f:cpuCoreCount: - f:dbBackupConfig: - f:dbEdition: - f:dbName: - f:dbUniqueName: - f:dbVersion: - f:diskRedundancy: - f:displayName: - f:faultDomains: - f:nodeCount: - f:status: - .: - f:availabilityDomain: - f:cpuCoreCount: - f:dataStoragePercentage: - f:dataStorageSizeInGBs: - f:dbEdition: - f:dbInfo: - f:displayName: - f:id: - f:licenseModel: - f:network: - .: - f:clientSubnet: - f:domainName: - f:hostName: - f:listenerPort: - f:scanDnsName: - f:vcnName: - f:nodeCount: - f:recoStorageSizeInGB: - f:shape: - f:state: - f:storageManagement: - f:subnetId: - f:timeZone: - f:workRequests: - Manager: manager - Operation: Update - Time: 2022-03-08T23:32:55Z - Resource Version: 55214174 - UID: 96d7bc49-33e9-42cc-8dd0-ada9a5a4c7e5 + Creation Timestamp: 2024-12-10T10:54:17Z + Generation: 2 + Resource Version: 117775637 + UID: c9da1245-3582-4926-b311-c24d75e75003 Spec: Db System: - Availability Domain: OLou:PHX-AD-1 - Compartment Id: ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya + Availability Domain: OLou:AP-MUMBAI-1-AD-1 + Compartment Id: ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a Db Admin Pasword Secret: admin-password - Host Name: host0130 - Shape: VM.Standard2.1 + Db Backup Config: + Domain: subdda0b5eaa.cluster1.oraclevcn.com + Host Name: host1234 + Kms Config: + Shape: VM.Standard2.1 Ssh Public Keys: oci-publickey - Subnet Id: ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a - Id: ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa - Oci Config Map: oci-cred + Subnet Id: ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa + Kms Config: + Oci Config Map: oci-cred-mumbai Oci Secret: oci-privatekey Status: - Availability Domain: OLou:PHX-AD-1 + Availability Domain: OLou:AP-MUMBAI-1-AD-1 Cpu Core Count: 1 Data Storage Percentage: 80 Data Storage Size In G Bs: 256 - Db Edition: ENTERPRISE_EDITION + Db Clone Status: + Db Db Unique Name: + Host Name: + Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE Db Info: - Db Home Id: ocid1.dbhome.oc1.phx.anyhqljr5gy3jhqat52milqwt3gq6lwohhacwg5yi4mtzq7c7hag53lrkugq - Db Name: db0130 - Db Unique Name: db0130_phx1zn + Db Home Id: ocid1.dbhome.oc1.ap-mumbai-1.anrg6ljrqlb5nxiaoqqlaxhx4urdwmefw4il5efzekneuru4bpfv57i7iy6a + Db Name: cdb1 + Db Unique Name: cdb1_tkf_bom Db Workload: OLTP - Id: ocid1.database.oc1.phx.anyhqljrabf7htyackgmsaqjfexoqgrzuuk33ju2q25z2al43tnd5mhhvkra - Display Name: dbsystem20220308221302 - Id: ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa - License Model: LICENSE_INCLUDED + Id: ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyalxin4xpiggjh4nxlta6o6iq56hjrlh4of2cq6c4qgrqa + Display Name: dbsystem1234 + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa + Kms Details Status: + License Model: BRING_YOUR_OWN_LICENSE Network: - Client Subnet: k8test-pubvcn - Domain Name: k8testpubvcn.k8test.oraclevcn.com - Host Name: host0130 - Listener Port: 1521 - Scan Dns Name: host0130-scan.k8testpubvcn.k8test.oraclevcn.com - Vcn Name: k8test - Node Count: 1 - Reco Storage Size In GB: 256 - Shape: VM.Standard2.1 - State: AVAILABLE - Storage Management: ASM - Subnet Id: ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a - Time Zone: UTC + Client Subnet: oke-nodesubnet-quick-cluster1-2bebe95db-regional + Domain Name: subdda0b5eaa.cluster1.oraclevcn.com + Host Name: host1234 + Listener Port: 1521 + Scan Dns Name: host1234-scan.subdda0b5eaa.cluster1.oraclevcn.com + Vcn Name: oke-vcn-quick-cluster1-2bebe95db + Node Count: 1 + Pdb Details Status: + Pdb Config Status: + Pdb Name: cdb1_pdb1 + Pdb State: AVAILABLE + Pluggable Database Id: ocid1.pluggabledatabase.oc1.ap-mumbai-1.anrg6ljrabf7htyakgj4wuabus6z5kmalvob6r6b7vivkbsmmh7bjprzbuwa + Reco Storage Size In GB: 256 + Shape: VM.Standard2.1 + State: AVAILABLE + Storage Management: ASM + Subnet Id: ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq + Time Zone: UTC Work Requests: - Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljrxivzvgzel47zuoyke5yk36o7mrgjl27vscd5z3bqptmyh3rxwbqq + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrtpnjzjidageolva6ytlzjfb2lqhbbrivm4lsb67xyjzyyke6bt4a + Operation Type: Update Shape + Percent Complete: 100 + Time Accepted: 2024-12-10 08:57:53.547 +0000 UTC + Time Finished: 2024-12-10 09:14:04.572 +0000 UTC + Time Started: 2024-12-10 08:57:57.588 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrxg7gov22vlcbqbnxrkl7t7xkcfya6w6gvck344jdf5vtqgw5wzgq + Operation Type: Update DB System + Percent Complete: 100 + Time Accepted: 2024-12-10 08:57:43.701 +0000 UTC + Time Finished: 2024-12-10 09:14:22.705 +0000 UTC + Time Started: 2024-12-10 08:57:53.873 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrk2efvqjda2t7k5iaerahw7wcyz5dq2zev2k55gmq2gvsjkui7hxq Operation Type: Create DB System Percent Complete: 100 - Time Accepted: 2022-03-08 22:13:02.999 +0000 UTC - Time Finished: 2022-03-08 23:11:50.46 +0000 UTC - Time Started: 2022-03-08 22:13:16.995 +0000 UTC - Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljrc3fx6kwq4yxerk3ngztdbbngm7w4dnlddcdhxqxjn6e4kcyux5ca - Operation Type: Update Shape + Time Accepted: 2024-12-10 05:19:52.499 +0000 UTC + Time Finished: 2024-12-10 07:59:19.083 +0000 UTC + Time Started: 2024-12-10 05:19:55.747 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljr4qmf6rdtcbrc5p2q7bev3igugtpgfbwc2laht22yyjzr2srrg7vq + Operation Type: Update DB System Percent Complete: 100 - Time Accepted: 2022-03-08 23:33:42.807 +0000 UTC - Time Finished: 2022-03-08 23:46:21.126 +0000 UTC - Time Started: 2022-03-08 23:33:52.109 +0000 UTC - Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljr5sveun3f6k3zuz23py7mm7jncmpq5vwyajbo5ezhc765347defwq + Time Accepted: 2024-12-10 10:57:27.313 +0000 UTC + Time Finished: 2024-12-10 11:15:50.597 +0000 UTC + Time Started: 2024-12-10 10:57:45.242 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljr2vehqv3vgrxr5mrmd6hoqxg2zr6m5eaunv3ip6bcrubcpvhudmia Operation Type: Update Shape Percent Complete: 100 - Time Accepted: 2022-03-09 00:25:03.644 +0000 UTC - Time Finished: 2022-03-09 00:38:59.526 +0000 UTC - Time Started: 2022-03-09 00:25:15.578 +0000 UTC + Time Accepted: 2024-12-10 10:57:44.95 +0000 UTC + Time Finished: 2024-12-10 11:15:40.364 +0000 UTC + Time Started: 2024-12-10 10:57:54.082 +0000 UTC Events: \ No newline at end of file diff --git a/docs/dbcs/provisioning/scale_up_dbcs_system_shape.md b/docs/dbcs/provisioning/scale_up_dbcs_system_shape.md index 8efccb5f..924a8517 100644 --- a/docs/dbcs/provisioning/scale_up_dbcs_system_shape.md +++ b/docs/dbcs/provisioning/scale_up_dbcs_system_shape.md @@ -1,26 +1,26 @@ -# Scale UP the shape of an existing DBCS System +# Scale UP the shape of an existing OBDS System -In this use case, an existing OCI DBCS system deployed earlier is scaled up for its shape using Oracle DB Operator DBCS controller. Its a 2 Step operation. +In this use case, an existing OCI OBDS system deployed earlier is scaled up for its shape using Oracle DB Operator OBDS controller. Its a 2 Step operation. -In order to scale up an existing DBCS system, the steps will be: +In order to scale up an existing OBDS system, the steps will be: -1. Bind the existing DBCS System to DBCS Controller. +1. Bind the existing OBDS System to OBDS Controller. 2. Apply the change to scale up its shape. **NOTE:** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. -This example uses `scale_up_dbcs_system_shape.yaml` to scale up a Single Instance DBCS VMDB using Oracle DB Operator DBCS Controller with: +This example uses `scale_up_dbcs_system_shape.yaml` to scale up a Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: -- OCID of existing VMDB as `ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa` +- OCID of existing VMDB as `ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa` - OCI Configmap as `oci-cred` - OCI Secret as `oci-privatekey` -- Availability Domain for the DBCS VMDB as `OLou:PHX-AD-1` -- Compartment OCID as `ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya` +- Availability Domain for the OBDS VMDB as `OLou:AP-MUMBAI-1-AD-1` +- Compartment OCID as `ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72` - Database Admin Credential as `admin-password` -- Database Hostname Prefix as `host0130` -- Oracle VMDB Shape as `VM.Standard2.2` -- SSH Public key for the DBCS system being deployed as `oci-publickey` -- OCID of the Subnet as `ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a` +- Database Hostname Prefix as `host1234` +- Oracle VMDB target Shape as `VM.Standard2.2` +- SSH Public key for the OBDS system being deployed as `oci-publickey` +- OCID of the Subnet as `ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq` **NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). @@ -28,18 +28,18 @@ Use the file: [scale_up_dbcs_system_shape.yaml](./scale_up_dbcs_system_shape.yam 1. Deploy the .yaml file: ```sh -[root@docker-test-server DBCS]# kubectl apply -f scale_up_dbcs_system_shape.yaml +[root@docker-test-server OBDS]# kubectl apply -f scale_up_dbcs_system_shape.yaml dbcssystem.database.oracle.com/dbcssystem-existing configured ``` -2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the DBCS VMDB Scale up. +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB Scale up. NOTE: Check the DB Operator Pod name in your environment. ``` -[root@docker-test-server DBCS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system ``` ## Sample Output -[Here](./scale_up_dbcs_system_shape_sample_output.log) is the sample output for scaling up the shape of an existing DBCS System deployed in OCI using Oracle DB Operator DBCS Controller. +[Here](./scale_up_dbcs_system_shape_sample_output.log) is the sample output for scaling up the shape of an existing OBDS System deployed in OCI using Oracle DB Operator OBDS Controller. diff --git a/docs/dbcs/provisioning/scale_up_dbcs_system_shape.yaml b/docs/dbcs/provisioning/scale_up_dbcs_system_shape.yaml index d1c2b95d..0be84c53 100644 --- a/docs/dbcs/provisioning/scale_up_dbcs_system_shape.yaml +++ b/docs/dbcs/provisioning/scale_up_dbcs_system_shape.yaml @@ -1,17 +1,18 @@ -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: DbcsSystem metadata: name: dbcssystem-existing spec: - id: "ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa" + id: "ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa" ociConfigMap: "oci-cred" ociSecret: "oci-privatekey" dbSystem: - availabilityDomain: "OLou:PHX-AD-1" - compartmentId: "ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya" - dbAdminPaswordSecret: "admin-password" - hostName: "host0130" + availabilityDomain: "OLou:AP-MUMBAI-1-AD-1" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" + dbAdminPasswordSecret: "admin-password" + hostName: "host1234" shape: "VM.Standard2.2" + domain: "subdda0b5eaa.cluster1.oraclevcn.com" sshPublicKeys: - - "oci-publickey" - subnetId: "ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a" + - "oci-publickey" + subnetId: "ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq" diff --git a/docs/dbcs/provisioning/scale_up_storage.md b/docs/dbcs/provisioning/scale_up_storage.md index 64514025..ff16cbf9 100644 --- a/docs/dbcs/provisioning/scale_up_storage.md +++ b/docs/dbcs/provisioning/scale_up_storage.md @@ -1,45 +1,45 @@ -# Scale UP the storage of an existing DBCS System +# Scale UP the storage of an existing OBDS System -In this use case, an existing OCI DBCS system deployed earlier is scaled up for its storage using Oracle DB Operator DBCS controller. Its a 2 Step operation. +In this use case, an existing OCI OBDS system deployed earlier is scaled up for its storage using Oracle DB Operator OBDS controller. Its a 2 Step operation. -In order to scale up storage of an existing DBCS system, the steps will be: +In order to scale up storage of an existing OBDS system, the steps will be: -1. Bind the existing DBCS System to DBCS Controller. +1. Bind the existing OBDS System to OBDS Controller. 2. Apply the change to scale up its storage. **NOTE** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. -This example uses `scale_up_storage.yaml` to scale up storage of an existing Single Instance DBCS VMDB using Oracle DB Operator DBCS Controller with: +This example uses `scale_up_storage.yaml` to scale up storage of an existing Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: -- OCID of existing VMDB as `ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa` +- OCID of existing VMDB as `ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa` - OCI Configmap as `oci-cred` - OCI Secret as `oci-privatekey` -- Availability Domain for the DBCS VMDB as `OLou:PHX-AD-1` -- Compartment OCID as `ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya` +- Availability Domain for the OBDS VMDB as `OLou:AP-MUMBAI-1-AD-1` +- Compartment OCID as `ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a` - Database Admin Credential as `admin-password` -- Database Hostname Prefix as `host0130` +- Database Hostname Prefix as `host1234` - Target Data Storage Size in GBs as `512` - Oracle VMDB Shape as `VM.Standard2.1` -- SSH Public key for the DBCS system being deployed as `oci-publickey` -- OCID of the Subnet as `ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a` +- SSH Public key for the OBDS system being deployed as `oci-publickey` +- OCID of the Subnet as `ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq` Use the file: [scale_up_storage.yaml](./scale_up_storage.yaml) for this use case as below: 1. Deploy the .yaml file: ```sh -[root@test-server DBCS]# kubectl apply -f scale_storage.yaml +[root@test-server OBDS]# kubectl apply -f scale_storage.yaml dbcssystem.database.oracle.com/dbcssystem-existing configured ``` -2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the DBCS VMDB Scale up. +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB Scale up. NOTE: Check the DB Operator Pod name in your environment. ``` -[root@docker-test-server DBCS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system ``` ## Sample Output -[Here](./scale_up_storage_sample_output.log) is the sample output for scaling up the storage of an existing DBCS System deployed in OCI using Oracle DB Operator DBCS Controller with minimal parameters. +[Here](./scale_up_storage_sample_output.log) is the sample output for scaling up the storage of an existing OBDS System deployed in OCI using Oracle DB Operator OBDS Controller with minimal parameters. diff --git a/docs/dbcs/provisioning/scale_up_storage.yaml b/docs/dbcs/provisioning/scale_up_storage.yaml index 34e64b5e..a2977157 100644 --- a/docs/dbcs/provisioning/scale_up_storage.yaml +++ b/docs/dbcs/provisioning/scale_up_storage.yaml @@ -1,18 +1,19 @@ -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: DbcsSystem metadata: name: dbcssystem-existing spec: - id: "ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa" + id: "ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa" ociConfigMap: "oci-cred" ociSecret: "oci-privatekey" dbSystem: - availabilityDomain: "OLou:PHX-AD-1" - compartmentId: "ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya" - dbAdminPaswordSecret: "admin-password" - hostName: "host0130" - initialDataStorageSizeInGB: 512 + availabilityDomain: "OLou:AP-MUMBAI-1-AD-1" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" + dbAdminPasswordSecret: "admin-password" + hostName: "host1234" shape: "VM.Standard2.1" + domain: "subdda0b5eaa.cluster1.oraclevcn.com" + initialDataStorageSizeInGB: 512 sshPublicKeys: - - "oci-publickey" - subnetId: "ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a" + - "oci-publickey" + subnetId: "ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq" \ No newline at end of file diff --git a/docs/dbcs/provisioning/scale_up_storage_sample_output.log b/docs/dbcs/provisioning/scale_up_storage_sample_output.log index 667667b8..e703391e 100644 --- a/docs/dbcs/provisioning/scale_up_storage_sample_output.log +++ b/docs/dbcs/provisioning/scale_up_storage_sample_output.log @@ -3,387 +3,111 @@ Name: dbcssystem-existing Namespace: default Labels: Annotations: lastSuccessfulSpec: - {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya","availabilityDomain":"O... -API Version: database.oracle.com/v1alpha1 + {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a","availabilityDomain":"O... +API Version: database.oracle.com/v4 Kind: DbcsSystem Metadata: - Creation Timestamp: 2022-03-08T23:27:48Z + Creation Timestamp: 2024-12-10T10:54:17Z Generation: 3 - Managed Fields: - API Version: database.oracle.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:kubectl.kubernetes.io/last-applied-configuration: - f:spec: - .: - f:dbSystem: - .: - f:availabilityDomain: - f:compartmentId: - f:dbAdminPaswordSecret: - f:hostName: - f:shape: - f:sshPublicKeys: - f:subnetId: - f:id: - f:ociConfigMap: - f:ociSecret: - Manager: kubectl-client-side-apply - Operation: Update - Time: 2022-03-08T23:32:50Z - API Version: database.oracle.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - f:lastSuccessfulSpec: - f:spec: - f:dbSystem: - f:cpuCoreCount: - f:dbBackupConfig: - f:dbEdition: - f:dbName: - f:dbUniqueName: - f:dbVersion: - f:diskRedundancy: - f:displayName: - f:faultDomains: - f:nodeCount: - f:status: - .: - f:availabilityDomain: - f:cpuCoreCount: - f:dataStoragePercentage: - f:dataStorageSizeInGBs: - f:dbEdition: - f:dbInfo: - f:displayName: - f:id: - f:licenseModel: - f:network: - .: - f:clientSubnet: - f:domainName: - f:hostName: - f:listenerPort: - f:scanDnsName: - f:vcnName: - f:nodeCount: - f:recoStorageSizeInGB: - f:shape: - f:state: - f:storageManagement: - f:subnetId: - f:timeZone: - f:workRequests: - Manager: manager - Operation: Update - Time: 2022-03-08T23:32:55Z - Resource Version: 55214174 - UID: 96d7bc49-33e9-42cc-8dd0-ada9a5a4c7e5 + Resource Version: 117788129 + UID: c9da1245-3582-4926-b311-c24d75e75003 Spec: Db System: - Availability Domain: OLou:PHX-AD-1 - Compartment Id: ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya + Availability Domain: OLou:AP-MUMBAI-1-AD-1 + Compartment Id: ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a Db Admin Pasword Secret: admin-password - Host Name: host0130 - Shape: VM.Standard2.1 + Db Backup Config: + Domain: subdda0b5eaa.cluster1.oraclevcn.com + Host Name: host1234 + Initial Data Storage Size In GB: 512 + Kms Config: + Shape: VM.Standard2.1 Ssh Public Keys: oci-publickey - Subnet Id: ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a - Id: ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa - Oci Config Map: oci-cred + Subnet Id: ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa + Kms Config: + Oci Config Map: oci-cred-mumbai Oci Secret: oci-privatekey Status: - Availability Domain: OLou:PHX-AD-1 + Availability Domain: OLou:AP-MUMBAI-1-AD-1 Cpu Core Count: 1 Data Storage Percentage: 80 - Data Storage Size In G Bs: 256 - Db Edition: ENTERPRISE_EDITION + Data Storage Size In G Bs: 512 + Db Clone Status: + Db Db Unique Name: + Host Name: + Db Edition: ENTERPRISE_EDITION_HIGH_PERFORMANCE Db Info: - Db Home Id: ocid1.dbhome.oc1.phx.anyhqljr5gy3jhqat52milqwt3gq6lwohhacwg5yi4mtzq7c7hag53lrkugq - Db Name: db0130 - Db Unique Name: db0130_phx1zn + Db Home Id: ocid1.dbhome.oc1.ap-mumbai-1.anrg6ljrqlb5nxiaoqqlaxhx4urdwmefw4il5efzekneuru4bpfv57i7iy6a + Db Name: cdb1 + Db Unique Name: cdb1_tkf_bom Db Workload: OLTP - Id: ocid1.database.oc1.phx.anyhqljrabf7htyackgmsaqjfexoqgrzuuk33ju2q25z2al43tnd5mhhvkra - Display Name: dbsystem20220308221302 - Id: ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa - License Model: LICENSE_INCLUDED + Id: ocid1.database.oc1.ap-mumbai-1.anrg6ljrabf7htyalxin4xpiggjh4nxlta6o6iq56hjrlh4of2cq6c4qgrqa + Display Name: dbsystem1234 + Id: ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa + Kms Details Status: + License Model: BRING_YOUR_OWN_LICENSE Network: - Client Subnet: k8test-pubvcn - Domain Name: k8testpubvcn.k8test.oraclevcn.com - Host Name: host0130 - Listener Port: 1521 - Scan Dns Name: host0130-scan.k8testpubvcn.k8test.oraclevcn.com - Vcn Name: k8test - Node Count: 1 - Reco Storage Size In GB: 256 - Shape: VM.Standard2.1 - State: AVAILABLE - Storage Management: ASM - Subnet Id: ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a - Time Zone: UTC + Client Subnet: oke-nodesubnet-quick-cluster1-2bebe95db-regional + Domain Name: subdda0b5eaa.cluster1.oraclevcn.com + Host Name: host1234 + Listener Port: 1521 + Scan Dns Name: host1234-scan.subdda0b5eaa.cluster1.oraclevcn.com + Vcn Name: oke-vcn-quick-cluster1-2bebe95db + Node Count: 1 + Pdb Details Status: + Pdb Config Status: + Pdb Name: cdb1_pdb1 + Pdb State: AVAILABLE + Pluggable Database Id: ocid1.pluggabledatabase.oc1.ap-mumbai-1.anrg6ljrabf7htyakgj4wuabus6z5kmalvob6r6b7vivkbsmmh7bjprzbuwa + Reco Storage Size In GB: 256 + Shape: VM.Standard2.1 + State: AVAILABLE + Storage Management: ASM + Subnet Id: ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq + Time Zone: UTC Work Requests: - Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljrxivzvgzel47zuoyke5yk36o7mrgjl27vscd5z3bqptmyh3rxwbqq - Operation Type: Create DB System - Percent Complete: 100 - Time Accepted: 2022-03-08 22:13:02.999 +0000 UTC - Time Finished: 2022-03-08 23:11:50.46 +0000 UTC - Time Started: 2022-03-08 22:13:16.995 +0000 UTC - Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljrc3fx6kwq4yxerk3ngztdbbngm7w4dnlddcdhxqxjn6e4kcyux5ca + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrtpnjzjidageolva6ytlzjfb2lqhbbrivm4lsb67xyjzyyke6bt4a Operation Type: Update Shape Percent Complete: 100 - Time Accepted: 2022-03-08 23:33:42.807 +0000 UTC - Time Finished: 2022-03-08 23:46:21.126 +0000 UTC - Time Started: 2022-03-08 23:33:52.109 +0000 UTC - Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljr5sveun3f6k3zuz23py7mm7jncmpq5vwyajbo5ezhc765347defwq - Operation Type: Update Shape + Time Accepted: 2024-12-10 08:57:53.547 +0000 UTC + Time Finished: 2024-12-10 09:14:04.572 +0000 UTC + Time Started: 2024-12-10 08:57:57.588 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrxg7gov22vlcbqbnxrkl7t7xkcfya6w6gvck344jdf5vtqgw5wzgq + Operation Type: Update DB System Percent Complete: 100 - Time Accepted: 2022-03-09 00:25:03.644 +0000 UTC - Time Finished: 2022-03-09 00:38:59.526 +0000 UTC - Time Started: 2022-03-09 00:25:15.578 +0000 UTC -Events: -[root@docker-test-server test]# - - - -[root@docker-test-server test]# cat scale_up_storage.yaml -apiVersion: database.oracle.com/v1alpha1 -kind: DbcsSystem -metadata: - name: dbcssystem-existing -spec: - id: "ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa" - ociConfigMap: "oci-cred" - ociSecret: "oci-privatekey" - dbSystem: - availabilityDomain: "OLou:PHX-AD-1" - compartmentId: "ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya" - dbAdminPaswordSecret: "admin-password" - hostName: "host0130" - initialDataStorageSizeInGB: 512 - shape: "VM.Standard2.1" - sshPublicKeys: - - "oci-publickey" - subnetId: "ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a" -[root@docker-test-server test]# -[root@docker-test-server test]# kubectl apply -f scale_up_storage.yaml -dbcssystem.database.oracle.com/dbcssystem-existing configured -[root@docker-test-server test]# - - - -[root@docker-test-server test]# kubectl get ns - -kubectl get allNAME STATUS AGE -cert-manager Active 13d -default Active 139d -kube-node-lease Active 139d -kube-public Active 139d -kube-system Active 139d -oracle-database-operator-system Active 13d -shns Active 88d -[root@docker-test-server test]# -[root@docker-test-server test]# kubectl get all -n oracle-database-operator-system -NAME READY STATUS RESTARTS AGE -pod/oracle-database-operator-controller-manager-665874bd57-dlhls 1/1 Running 3 13d -pod/oracle-database-operator-controller-manager-665874bd57-g2cgw 1/1 Running 3 13d -pod/oracle-database-operator-controller-manager-665874bd57-q42f8 1/1 Running 4 13d - -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/oracle-database-operator-controller-manager-metrics-service ClusterIP 10.96.130.124 8443/TCP 13d -service/oracle-database-operator-webhook-service ClusterIP 10.96.4.104 443/TCP 13d - -NAME READY UP-TO-DATE AVAILABLE AGE -deployment.apps/oracle-database-operator-controller-manager 3/3 3 3 13d - -NAME DESIRED CURRENT READY AGE -replicaset.apps/oracle-database-operator-controller-manager-665874bd57 3 3 3 13d -[root@docker-test-server test]# - - -[root@docker-test-server test]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-dlhls -n oracle-database-operator-system -. -. -2022-03-09T00:48:11.373Z INFO controller-runtime.manager.controller.dbcssystem OCI provider configured succesfully {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} -2022-03-09T00:48:15.961Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} -2022-03-09T00:49:16.273Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} -2022-03-09T00:50:16.557Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} -2022-03-09T00:51:16.910Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} -2022-03-09T00:52:17.277Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} -2022-03-09T00:53:17.600Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} -2022-03-09T00:54:18.189Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} -2022-03-09T00:55:18.506Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} -2022-03-09T00:56:18.862Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} -2022-03-09T00:57:19.180Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} -2022-03-09T00:58:19.544Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} -2022-03-09T00:59:19.870Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} -2022-03-09T01:00:20.230Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} -2022-03-09T01:01:20.663Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} -2022-03-09T01:02:21.303Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} -2022-03-09T01:03:21.690Z INFO controller-runtime.manager.controller.dbcssystem DB System current state is still:UPDATING. Sleeping for 60 seconds. {"reconciler group": "database.oracle.com", "reconciler kind": "DbcsSystem", "name": "dbcssystem-existing", "namespace": "default"} - - - - - -[root@docker-test-server test]# kubectl describe dbcssystems.database.oracle.com dbcssystem-existing -Name: dbcssystem-existing -Namespace: default -Labels: -Annotations: lastSuccessfulSpec: - {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya","availabilityDomain":"O... -API Version: database.oracle.com/v1alpha1 -Kind: DbcsSystem -Metadata: - Creation Timestamp: 2022-03-08T23:27:48Z - Generation: 4 - Managed Fields: - API Version: database.oracle.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - f:lastSuccessfulSpec: - f:spec: - f:dbSystem: - f:cpuCoreCount: - f:dbBackupConfig: - f:dbEdition: - f:dbName: - f:dbUniqueName: - f:dbVersion: - f:diskRedundancy: - f:displayName: - f:faultDomains: - f:nodeCount: - f:status: - .: - f:availabilityDomain: - f:cpuCoreCount: - f:dataStoragePercentage: - f:dataStorageSizeInGBs: - f:dbEdition: - f:dbInfo: - f:displayName: - f:id: - f:licenseModel: - f:network: - .: - f:clientSubnet: - f:domainName: - f:hostName: - f:listenerPort: - f:scanDnsName: - f:vcnName: - f:nodeCount: - f:recoStorageSizeInGB: - f:shape: - f:state: - f:storageManagement: - f:subnetId: - f:timeZone: - f:workRequests: - Manager: manager - Operation: Update - Time: 2022-03-08T23:32:55Z - API Version: database.oracle.com/v1alpha1 - Fields Type: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: - f:kubectl.kubernetes.io/last-applied-configuration: - f:spec: - .: - f:dbSystem: - .: - f:availabilityDomain: - f:compartmentId: - f:dbAdminPaswordSecret: - f:hostName: - f:initialDataStorageSizeInGB: - f:shape: - f:sshPublicKeys: - f:subnetId: - f:id: - f:ociConfigMap: - f:ociSecret: - Manager: kubectl-client-side-apply - Operation: Update - Time: 2022-03-09T00:48:11Z - Resource Version: 55222013 - UID: 96d7bc49-33e9-42cc-8dd0-ada9a5a4c7e5 -Spec: - Db System: - Availability Domain: OLou:PHX-AD-1 - Compartment Id: ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya - Db Admin Pasword Secret: admin-password - Host Name: host0130 - Initial Data Storage Size In GB: 512 - Shape: VM.Standard2.1 - Ssh Public Keys: - oci-publickey - Subnet Id: ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a - Id: ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa - Oci Config Map: oci-cred - Oci Secret: oci-privatekey -Status: - Availability Domain: OLou:PHX-AD-1 - Cpu Core Count: 1 - Data Storage Percentage: 80 - Data Storage Size In G Bs: 512 - Db Edition: ENTERPRISE_EDITION - Db Info: - Db Home Id: ocid1.dbhome.oc1.phx.anyhqljr5gy3jhqat52milqwt3gq6lwohhacwg5yi4mtzq7c7hag53lrkugq - Db Name: db0130 - Db Unique Name: db0130_phx1zn - Db Workload: OLTP - Id: ocid1.database.oc1.phx.anyhqljrabf7htyackgmsaqjfexoqgrzuuk33ju2q25z2al43tnd5mhhvkra - Display Name: dbsystem20220308221302 - Id: ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa - License Model: LICENSE_INCLUDED - Network: - Client Subnet: k8test-pubvcn - Domain Name: k8testpubvcn.k8test.oraclevcn.com - Host Name: host0130 - Listener Port: 1521 - Scan Dns Name: host0130-scan.k8testpubvcn.k8test.oraclevcn.com - Vcn Name: k8test - Node Count: 1 - Reco Storage Size In GB: 256 - Shape: VM.Standard2.1 - State: AVAILABLE - Storage Management: ASM - Subnet Id: ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a - Time Zone: UTC - Work Requests: - Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljrxivzvgzel47zuoyke5yk36o7mrgjl27vscd5z3bqptmyh3rxwbqq + Time Accepted: 2024-12-10 08:57:43.701 +0000 UTC + Time Finished: 2024-12-10 09:14:22.705 +0000 UTC + Time Started: 2024-12-10 08:57:53.873 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrk2efvqjda2t7k5iaerahw7wcyz5dq2zev2k55gmq2gvsjkui7hxq Operation Type: Create DB System Percent Complete: 100 - Time Accepted: 2022-03-08 22:13:02.999 +0000 UTC - Time Finished: 2022-03-08 23:11:50.46 +0000 UTC - Time Started: 2022-03-08 22:13:16.995 +0000 UTC - Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljrc3fx6kwq4yxerk3ngztdbbngm7w4dnlddcdhxqxjn6e4kcyux5ca - Operation Type: Update Shape + Time Accepted: 2024-12-10 05:19:52.499 +0000 UTC + Time Finished: 2024-12-10 07:59:19.083 +0000 UTC + Time Started: 2024-12-10 05:19:55.747 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljr4qmf6rdtcbrc5p2q7bev3igugtpgfbwc2laht22yyjzr2srrg7vq + Operation Type: Update DB System Percent Complete: 100 - Time Accepted: 2022-03-08 23:33:42.807 +0000 UTC - Time Finished: 2022-03-08 23:46:21.126 +0000 UTC - Time Started: 2022-03-08 23:33:52.109 +0000 UTC - Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljr5sveun3f6k3zuz23py7mm7jncmpq5vwyajbo5ezhc765347defwq + Time Accepted: 2024-12-10 10:57:27.313 +0000 UTC + Time Finished: 2024-12-10 11:15:50.597 +0000 UTC + Time Started: 2024-12-10 10:57:45.242 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljr2vehqv3vgrxr5mrmd6hoqxg2zr6m5eaunv3ip6bcrubcpvhudmia Operation Type: Update Shape Percent Complete: 100 - Time Accepted: 2022-03-09 00:25:03.644 +0000 UTC - Time Finished: 2022-03-09 00:38:59.526 +0000 UTC - Time Started: 2022-03-09 00:25:15.578 +0000 UTC - Operation Id: ocid1.coreservicesworkrequest.oc1.phx.abyhqljrbaqah6qktukvdlnx66fp2hlevegryfuppsshkqemfcdjtwfwaq3q + Time Accepted: 2024-12-10 10:57:44.95 +0000 UTC + Time Finished: 2024-12-10 11:15:40.364 +0000 UTC + Time Started: 2024-12-10 10:57:54.082 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljr36bt7ot5oq3otch4bu2axn3azkicot4zuwgwmxeupxr4siisydja + Operation Type: Scale Storage + Percent Complete: 100 + Time Accepted: 2024-12-10 11:44:49.369 +0000 UTC + Time Finished: 2024-12-10 11:58:45.01 +0000 UTC + Time Started: 2024-12-10 11:44:55.544 +0000 UTC + Operation Id: ocid1.coreservicesworkrequest.oc1.ap-mumbai-1.abrg6ljrxdpmmaipuqke5yx3szyfnf2zwkfptz3jevlq3coicecfjihnm4kq Operation Type: Scale Storage Percent Complete: 100 - Time Accepted: 2022-03-09 00:48:54.849 +0000 UTC - Time Finished: 2022-03-09 01:03:10.885 +0000 UTC - Time Started: 2022-03-09 00:49:05.911 +0000 UTC -Events: -[root@docker-test-server test]# \ No newline at end of file + Time Accepted: 2024-12-10 11:44:55.255 +0000 UTC + Time Finished: 2024-12-10 11:58:25.229 +0000 UTC + Time Started: 2024-12-10 11:44:57.743 +0000 UTC +Events: \ No newline at end of file diff --git a/docs/dbcs/provisioning/terminate_dbcs_system.md b/docs/dbcs/provisioning/terminate_dbcs_system.md index 071cda30..f3b19cbc 100644 --- a/docs/dbcs/provisioning/terminate_dbcs_system.md +++ b/docs/dbcs/provisioning/terminate_dbcs_system.md @@ -1,15 +1,15 @@ -# Terminate an existing DBCS System +# Terminate an existing Oracle Base Database System (OBDS) -In this use case, an existing OCI DBCS system deployed earlier is terminated using Oracle DB Operator DBCS controller. Its a 2 Step operation. +In this use case, an existing OCI OBDS system deployed earlier is terminated using Oracle DB Operator OBDS controller. Its a 2 Step operation. -In order to terminate an existing DBCS system, the steps will be: +In order to terminate an existing OBDS system, the steps will be: -1. Bind the existing DBCS System to DBCS Controller. -2. Apply the change to terminate this DBCS System. +1. Bind the existing OBDS System to OBDS Controller. +2. Apply the change to terminate this OBDS System. **NOTE** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. -This example uses `terminate_dbcs_system.yaml` to terminated a Single Instance DBCS VMDB using Oracle DB Operator DBCS Controller with: +This example uses `terminate_dbcs_system.yaml` to terminated a Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: - OCID of existing VMDB as `ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa` - OCI Configmap as `oci-cred` @@ -21,26 +21,26 @@ Use the file: [terminate_dbcs_system.yaml](./terminate_dbcs_system.yaml) for thi 1. Deploy the .yaml file: ```sh -[root@test-server DBCS]# kubectl apply -f terminate_dbcs_system.yaml +[root@test-server OBDS]# kubectl apply -f terminate_dbcs_system.yaml dbcssystem.database.oracle.com/dbcssystem-terminate created -[root@test-server DBCS]# kubectl delete -f terminate_dbcs_system.yaml +[root@test-server OBDS]# kubectl delete -f terminate_dbcs_system.yaml dbcssystem.database.oracle.com "dbcssystem-terminate" deleted ``` 2. Check the logs of Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for an update on the terminate operation been accepted. ``` -[root@test-server DBCS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +[root@test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system ``` -3. Check and confirm if the existing OCI DBCS system is NO longer available after sometime because of termination: +3. Check and confirm if the existing OCI OBDS system is NO longer available after sometime because of termination: ``` -[root@test-server DBCS]# kubectl describe dbcssystems.database.oracle.com dbcssystem-terminate +[root@test-server OBDS]# kubectl describe dbcssystems.database.oracle.com dbcssystem-terminate ``` ## Sample Output -[Here](./terminate_dbcs_system_sample_output.log) is the sample output for terminating an existing DBCS System deployed in OCI using Oracle DB Operator DBCS Controller with minimal parameters. +[Here](./terminate_dbcs_system_sample_output.log) is the sample output for terminating an existing OBDS System deployed in OCI using Oracle DB Operator OBDS Controller with minimal parameters. diff --git a/docs/dbcs/provisioning/terminate_dbcs_system.yaml b/docs/dbcs/provisioning/terminate_dbcs_system.yaml index 075ce54e..a4a2f105 100644 --- a/docs/dbcs/provisioning/terminate_dbcs_system.yaml +++ b/docs/dbcs/provisioning/terminate_dbcs_system.yaml @@ -1,4 +1,4 @@ -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: DbcsSystem metadata: name: dbcssystem-terminate diff --git a/docs/dbcs/provisioning/terminate_dbcs_system_sample_output.log b/docs/dbcs/provisioning/terminate_dbcs_system_sample_output.log index ff8afc96..383f823c 100644 --- a/docs/dbcs/provisioning/terminate_dbcs_system_sample_output.log +++ b/docs/dbcs/provisioning/terminate_dbcs_system_sample_output.log @@ -4,13 +4,13 @@ Namespace: default Labels: Annotations: lastSuccessfulSpec: {"dbSystem":{"compartmentId":"ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya","availabilityDomain":"O... -API Version: database.oracle.com/v1alpha1 +API Version: database.oracle.com/v4 Kind: DbcsSystem Metadata: Creation Timestamp: 2022-03-08T23:27:48Z Generation: 5 Managed Fields: - API Version: database.oracle.com/v1alpha1 + API Version: database.oracle.com/v4 Fields Type: FieldsV1 fieldsV1: f:metadata: @@ -58,7 +58,7 @@ Metadata: Manager: manager Operation: Update Time: 2022-03-08T23:32:55Z - API Version: database.oracle.com/v1alpha1 + API Version: database.oracle.com/v4 Fields Type: FieldsV1 fieldsV1: f:metadata: @@ -164,7 +164,7 @@ Events: [root@docker-test-server test]# cat terminate_dbcs_system.yaml -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: DbcsSystem metadata: name: dbcssystem-terminate diff --git a/docs/dbcs/provisioning/update_license.md b/docs/dbcs/provisioning/update_license.md index 7c7c43b7..6f32c31b 100644 --- a/docs/dbcs/provisioning/update_license.md +++ b/docs/dbcs/provisioning/update_license.md @@ -1,27 +1,27 @@ -# Update License type of an existing DBCS System +# Update License type of an existing OBDS System -In this use case, the license type of an existing OCI DBCS system deployed earlier is changed from `License Included` to `Bring your own license` using Oracle DB Operator DBCS controller. Its a 2 Step operation. +In this use case, the license type of an existing OCI OBDS system deployed earlier is changed from `License Included` to `Bring your own license` using Oracle DB Operator OBDS controller. Its a 2 Step operation. -In order to update the license type an existing DBCS system, the steps will be: +In order to update the license type an existing OBDS system, the steps will be: -1. Bind the existing DBCS System to DBCS Controller. +1. Bind the existing OBDS System to OBDS Controller. 2. Apply the change to change its license type. **NOTE** We are assuming that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-deploy-a-dbcs-system-using-oracle-db-operator-dbcs-controller) steps to create the configmap and the secrets required during the deployment. -This example uses `update_license.yaml` to change the license type of a Single Instance DBCS VMDB using Oracle DB Operator DBCS Controller with: +This example uses `update_license.yaml` to change the license type of a Single Instance OBDS VMDB using Oracle DB Operator OBDS Controller with: -- OCID of existing VMDB as `ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa` +- OCID of existing VMDB as `ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa` - OCI Configmap as `oci-cred` - OCI Secret as `oci-privatekey` -- Availability Domain for the DBCS VMDB as `OLou:PHX-AD-1` -- Compartment OCID as `ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya` -- Database Admin Credential as `admin-password` -- Database Hostname Prefix as `host0130` +- Availability Domain for the OBDS VMDB as `OLou:AP-MUMBAI-1-AD-1` - Target license model as `BRING_YOUR_OWN_LICENSE` +- Compartment OCID as `ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a` +- Database Admin Credential as `admin-password` +- Database Hostname Prefix as `host1234` - Oracle VMDB Shape as `VM.Standard2.1` -- SSH Public key for the DBCS system being deployed as `oci-publickey` -- OCID of the Subnet as `ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a` +- SSH Public key for the OBDS system being deployed as `oci-publickey` +- OCID of the Subnet as `ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq` **NOTE:** For the details of the parameters to be used in the .yaml file, please refer [here](./dbcs_controller_parameters.md). @@ -29,18 +29,18 @@ Use the file: [update_license.yaml](./update_license.yaml) for this use case as 1. Deploy the .yaml file: ```sh -[root@test-server DBCS]# kubectl apply -f update_license.yaml +[root@test-server OBDS]# kubectl apply -f update_license.yaml dbcssystem.database.oracle.com/dbcssystem-existing configured ``` -2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the DBCS VMDB Scale up. +2. Monitor the Oracle DB Operator Pod `pod/oracle-database-operator-controller-manager-665874bd57-g2cgw` for the progress of the OBDS VMDB Scale up. NOTE: Check the DB Operator Pod name in your environment. ``` -[root@docker-test-server DBCS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system +[root@docker-test-server OBDS]# kubectl logs -f pod/oracle-database-operator-controller-manager-665874bd57-g2cgw -n oracle-database-operator-system ``` ## Sample Output -[Here](./update_license_sample_output.log) is the sample output for updating the license type an existing DBCS System deployed in OCI using Oracle DB Operator DBCS Controller. +[Here](./update_license_sample_output.log) is the sample output for updating the license type an existing OBDS System deployed in OCI using Oracle DB Operator OBDS Controller. diff --git a/docs/dbcs/provisioning/update_license.yaml b/docs/dbcs/provisioning/update_license.yaml index 1fb54a64..7c192b6b 100644 --- a/docs/dbcs/provisioning/update_license.yaml +++ b/docs/dbcs/provisioning/update_license.yaml @@ -1,18 +1,20 @@ -apiVersion: database.oracle.com/v1alpha1 -kind: DbcsSystem -metadata: - name: dbcssystem-existing -spec: - id: "ocid1.dbsystem.oc1.phx.anyhqljrabf7htyanr3lnp6wtu5ld7qwszohiteodvwahonr2yymrftarkqa" - dbSystem: - availabilityDomain: "OLou:PHX-AD-1" - compartmentId: "ocid1.compartment.oc1..aaaaaaaa4hecw2shffuuc4fcatpin4x3rdkesmmf4he67osupo7g6f7i6eya" - dbAdminPaswordSecret: "admin-password" - hostName: "host0130" - licenseModel: "BRING_YOUR_OWN_LICENSE" - shape: "VM.Standard2.1" - sshPublicKeys: - - "oci-publickey" - subnetId: "ocid1.subnet.oc1.phx.aaaaaaaauso243tymnzeh6zbz5vkejgyu4ugujul5okpa5xbaq3275izbc7a" - ociConfigMap: "oci-cred" - ociSecret: "oci-privatekey" + apiVersion: database.oracle.com/v4 + kind: DbcsSystem + metadata: + name: dbcssystem-existing + spec: + id: "ocid1.dbsystem.oc1.ap-mumbai-1.anrg6ljrabf7htyadgsso7aessztysrwaj5gcl3tp7ce6asijm2japyvmroa" + ociConfigMap: "oci-cred" + ociSecret: "oci-privatekey" + dbSystem: + availabilityDomain: "OLou:AP-MUMBAI-1-AD-1" + compartmentId: "ocid1.compartment.oc1..aaaaaaaa63yqilqhgxv3dszur3a2fgwc64ohpfy43vpqjm7q5zq4q4yaw72a" + dbAdminPasswordSecret: "admin-password" + hostName: "host1234" + licenseModel: "BRING_YOUR_OWN_LICENSE" + shape: "VM.Standard2.1" + domain: "subdda0b5eaa.cluster1.oraclevcn.com" + initialDataStorageSizeInGB: 512 + sshPublicKeys: + - "oci-publickey" + subnetId: "ocid1.subnet.oc1.ap-mumbai-1.aaaaaaaa5zpzfax66omtbmjwlv4thruyru7focnu7fjcjksujmgwmr6vpbvq" diff --git a/docs/multitenant/README.md b/docs/multitenant/README.md index 6c9a6756..0d3057fc 100644 --- a/docs/multitenant/README.md +++ b/docs/multitenant/README.md @@ -1,254 +1,11 @@ - +# Multitenant Controllers -# Oracle Multitenant Database Controllers +Starting from OraOperator version 1.2.0, there are two classes of multitenant controllers: one based on [ORDS](https://www.oracle.com/uk/database/technologies/appdev/rest.html) and another based on a dedicated REST server for the operator, called LREST. In both cases, the features remains unchanged (a part from CRD name changes). A pod running a REST server (either LREST or ORDS) acts as the proxy server connected to the container database (CDB) for all incoming kubectl requests. We plan to discontinue the ORDS based controller, in the next release; no regression (a part form CRD name changes). -The Oracle Database Operator for kubernetes uses two controllers to manage [Pluggable Database life cycle][oradocpdb] +## What are the differences -- CDB controller -- PDB controller - -By usigng CDB/PDB controllers you can perform the following actions **CREATE**,**MODIFY(OPEN/COSE)**,**DELETE**,**CLONE**,**PLUG** and **UNPLUG** - -This file examplains how to setup CDB and PDB controllers, additional details can be found in the README files under usecases directories.. - -- [Usecase01][uc01] pdb crd and cdb pod are running in the same namesaoce -- [Usecase02][uc02] unplug and plug operation examples -- [Usecase03][uc03] multiple namespace example cdb pod ,pdb crd and pod operator are running in different namespaces. - -> **NOTE** that there is no controller for Container Database Operations - -## Macro steps for setup - -- Deply the Oracle Database Operator -- Create Ords based image for CDB pod -- Container DB user creation -- Create secrets for credentials -- Create certificates for https connection -- Create CDB pod - -## Oracle DB Operator Multitenant Database Controller Deployment - -To deploy OraOperator, use this [Oracle Database Operator for Kubernetes](https://github.com/oracle/oracle-database-operator/blob/main/README.md) step-by-step procedure. - -After the Oracle Database Operator is deployed, you can see the DB Operator Pods running in the Kubernetes Cluster. As part of the `OraOperator` deployment, the multitenant Database Controller is deployed. You can see the CRDs (Custom Resource Definition) for the CDB and PDBs in the list of CRDs. The following output is an example of such a deployment: - -```bash -[root@test-server oracle-database-operator]# kubectl get ns -NAME STATUS AGE -cert-manager Active 32h -default Active 245d -kube-node-lease Active 245d -kube-public Active 245d -kube-system Active 245d -oracle-database-operator-system Active 24h <<<< namespace to deploy the Oracle Database Operator - -[root@test-server oracle-database-operator]# kubectl get all -n oracle-database-operator-system -NAME READY STATUS RESTARTS AGE -pod/oracle-database-operator-controller-manager-665874bd57-dlhls 1/1 Running 0 28s -pod/oracle-database-operator-controller-manager-665874bd57-g2cgw 1/1 Running 0 28s -pod/oracle-database-operator-controller-manager-665874bd57-q42f8 1/1 Running 0 28s - -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/oracle-database-operator-controller-manager-metrics-service ClusterIP 10.96.130.124 8443/TCP 29s -service/oracle-database-operator-webhook-service ClusterIP 10.96.4.104 443/TCP 29s - -NAME READY UP-TO-DATE AVAILABLE AGE -deployment.apps/oracle-database-operator-controller-manager 3/3 3 3 29s - -NAME DESIRED CURRENT READY AGE -replicaset.apps/oracle-database-operator-controller-manager-665874bd57 3 3 3 29s -[root@docker-test-server oracle-database-operator]# - -[root@test-server oracle-database-operator]# kubectl get crd -NAME CREATED AT -autonomouscontainerdatabases.database.oracle.com 2022-06-22T01:21:36Z -autonomousdatabasebackups.database.oracle.com 2022-06-22T01:21:36Z -autonomousdatabaserestores.database.oracle.com 2022-06-22T01:21:37Z -autonomousdatabases.database.oracle.com 2022-06-22T01:21:37Z -cdbs.database.oracle.com 2022-06-22T01:21:37Z <<<< -certificaterequests.cert-manager.io 2022-06-21T17:03:46Z -certificates.cert-manager.io 2022-06-21T17:03:47Z -challenges.acme.cert-manager.io 2022-06-21T17:03:47Z -clusterissuers.cert-manager.io 2022-06-21T17:03:48Z -dbcssystems.database.oracle.com 2022-06-22T01:21:38Z -issuers.cert-manager.io 2022-06-21T17:03:49Z -oraclerestdataservices.database.oracle.com 2022-06-22T01:21:38Z -orders.acme.cert-manager.io 2022-06-21T17:03:49Z -pdbs.database.oracle.com 2022-06-22T01:21:39Z <<<< -shardingdatabases.database.oracle.com 2022-06-22T01:21:39Z -singleinstancedatabases.database.oracle.com 2022-06-22T01:21:40Z -``` - - -## Prerequsites to manage PDB Life Cycle using Oracle DB Operator Multitenant Database Controller - -* [Prepare the container database for PDB Lifecycle Management or PDB-LM](#prepare-cdb-for-pdb-lifecycle-management-pdb-lm) -* [Oracle REST Data Service or ORDS Image](#oracle-rest-data-service-ords-image) -* [Kubernetes Secrets](#kubernetes-secrets) -* [Kubernetes CRD for CDB](#kubernetes-crd-for-cdb) -* [Kubernetes CRD for PDB](#kubernetes-crd-for-pdb) - -## Prepare the container database for PDB Lifecycle Management (PDB-LM) - -Pluggable Database (PDB) management operations are performed in the Container Database (CDB). These operations include create, clone, plug, unplug, delete, modify and map pdb. - -You cannot have an ORDS-enabled schema in the container database. To perform the PDB lifecycle management operations, you must first use the following steps to define the default CDB administrator credentials on target CDBs: - -Create the CDB administrator user, and grant the required privileges. In this example, the user is `C##DBAPI_CDB_ADMIN`. However, any suitable common user name can be used. - -```SQL -SQL> conn /as sysdba - --- Create following users at the database level: - -ALTER SESSION SET "_oracle_script"=true; -DROP USER C##DBAPI_CDB_ADMIN cascade; -CREATE USER C##DBAPI_CDB_ADMIN IDENTIFIED BY CONTAINER=ALL ACCOUNT UNLOCK; -GRANT SYSOPER TO C##DBAPI_CDB_ADMIN CONTAINER = ALL; -GRANT SYSDBA TO C##DBAPI_CDB_ADMIN CONTAINER = ALL; -GRANT CREATE SESSION TO C##DBAPI_CDB_ADMIN CONTAINER = ALL; - - --- Verify the account status of the following usernames. They should not be in locked status: - -col username for a30 -col account_status for a30 -select username, account_status from dba_users where username in ('ORDS_PUBLIC_USER','C##DBAPI_CDB_ADMIN','APEX_PUBLIC_USER','APEX_REST_PUBLIC_USER'); -``` - -## OCI OKE(Kubernetes Cluster) - -You can use an [OKE in Oracle Cloud Infrastructure][okelink] to configure the operator for PDB lifecycle management. **Note that there is no restriction about container database location; it can be anywhere (on cloud or premises , on any supported platform).** -To quickly create an OKE cluster in your OCI cloud environment you can use the following [link](./provisioning/quickOKEcreation.md). -In this setup example [provisioning example setup](./provisioning/example_setup_using_oci_oke_cluster.md), the Container database is running on a OCI Exadata Database Cluster. - - -## Oracle REST Data Service (ORDS) Image - - The PDB Database controllers require a pod running a dedicated rest server image based on [ORDS][ordsdoc]. Read the following [link](./provisioning/ords_image.md) to build the ords images. - - -## Kubernetes Secrets - - Multitenant Controllers use Kubernetes Secrets to store the required credential. The https certificates are stored in Kubernetes Secrets as well. - - **Note** In multi namespace enviroment you have to create specific secrets for each namespaces - - **Note** In multi namespace enviroment you have to create specific secrets for each namespaces - - **Note** In multi namespace enviroment you have to create specific secrets for each namespaces - -### Secrets for CDB CRD - - Create a secret file as shown here: [config/samples/multitenant/cdb_secret.yaml](../multitenant/provisioning/singlenamespace/cdb_create.yaml). Modify this file with the `base64` encoded values of the required passwords for CDB, and use this file to create the required secrets. - - ```bash - kubectl apply -f cdb_secret.yaml - ``` - - **Note:** To obtain the `base64` encoded value for a password, use the following command: - - ```bash - echo -n "" | base64 - ``` - - **Note:** After successful creation of the CDB Resource, the CDB secrets are deleted from the Kubernetes system . - -### Secrets for PDB CRD - - Create a secret file as shown here: [pdb_secret.yaml](../multitenant/provisioning/singlenamespace/pdb_secret.yaml). Edit the file using your base64 credential and apply it. - - ```bash - kubectl apply -f pdb_secret.yaml - ``` - - **NOTE:** Don't leave plaintext files containing sensitive data on disk. After loading the Secret, remove the plaintext file or move it to secure storage. - -### Secrets for CERTIFICATES - -Create the certificates and key on your local host, and use them to create the Kubernetes secret. - -```bash -openssl genrsa -out ca.key 2048 -openssl req -new -x509 -days 365 -key ca.key -subj "/C=US/ST=California/L=SanFrancisco/O=oracle /CN=cdb-dev-ords /CN=localhost Root CA " -out ca.crt -openssl req -newkey rsa:2048 -nodes -keyout tls.key -subj "/C=US/ST=California/L=SanFrancisco/O=oracle /CN=cdb-dev-ords /CN=localhost" -out server.csr -echo "subjectAltName=DNS:cdb-dev-ords,DNS:www.example.com" > extfile.txt -openssl x509 -req -extfile extfile.txt -days 365 -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out tls.crt -``` - -```bash -kubectl create secret tls db-tls --key="tls.key" --cert="tls.crt" -n oracle-database-operator-system -kubectl create secret generic db-ca --from-file=ca.crt -n oracle-database-operator-system -``` - -image_not_found - -**Note:** On successful creation of the certificates secret creation remove files or move to secure storage . - -## Kubernetes CRD for CDB - -The Oracle Database Operator Multitenant Controller creates the CDB kind as a custom resource that models a target CDB as a native Kubernetes object. This kind is used only to create Pods to connect to the target CDB to perform PDB-LM operations. Each CDB resource follows the CDB CRD as defined here: [config/crd/bases/database.oracle.com_cdbs.yaml](../../config/crd/bases/database.oracle.com_cdbs.yaml) - -To create a CDB CRD, see this example `.yaml` file: [cdb_create.yaml](../multitenant/provisioning/singlenamespace/cdb_create.yaml) - -**Note:** The password and username fields in this *cdb.yaml* Yaml are the Kubernetes Secrets created earlier in this procedure. For more information, see the section [Kubernetes Secrets](https://kubernetes.io/docs/concepts/configuration/secret/). To understand more about creating secrets for pulling images from a Docker private registry, see [Kubernetes Private Registry Documenation]( https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/). - -Create a CDB CRD Resource example - -```bash -kubectl apply -f cdb_create.yaml -``` - -see [usecase01][uc01] and usecase03[uc03] for more information about file configuration - -## Kubernetes CRD for PDB - -The Oracle Database Operator Multitenant Controller creates the PDB kind as a custom resource that models a PDB as a native Kubernetes object. There is a one-to-one mapping between the actual PDB and the Kubernetes PDB Custom Resource. You cannot have more than one Kubernetes resource for a target PDB. This PDB resource can be used to perform PDB-LM operations by specifying the action attribute in the PDB Specs. Each PDB resource follows the PDB CRD as defined here: [config/crd/bases/database.oracle.com_pdbs.yaml](../../../config/crd/bases/database.oracle.com_pdbs.yaml) - -To create a PDB CRD Resource, a sample .yaml file is available here: [pdb_create.yaml](../multitenant/provisioning/singlenamespace/pdb_create.yaml) - -```bash -kubectl apply -f cdb_create.yaml -``` - -## Usecases files list - -### Single Namespace - -1. [Create CDB](./provisioning/singlenamespace/cdb_create.yaml) -2. [Create PDB](./provisioning/singlenamespace/pdb_create.yaml) -3. [Clone PDB](./provisioning/singlenamespace/pdb_clone.yaml) -4. [Open PDB](./provisioning/singlenamespace/pdb_open.yaml) -4. [Close PDB](./provisioning/singlenamespace/pdb_close.yaml) -5. [Delete PDB](./provisioning/singlenamespace/pdb_delete.yaml) -6. [Unplug PDB](./provisioning/singlenamespace/pdb_unplug.yaml) -7. [Plug PDB](./provisioning/singlenamespace/pdb_plug.yaml) - -### Multiple namespace (cdbnamespace,dbnamespace) - -1. [Create CDB](./provisioning/multinamespace/cdb_create.yaml) -2. [Create PDB](./provisioning/multinamespace/pdb_create.yaml) -3. [Clone PDB](./provisioning/multinamespace/pdb_clone.yaml) -4. [Open PDB](./provisioning/multinamespace/pdb_open.yaml) -4. [Close PDB](./provisioning/multinamespace/pdb_close.yaml) -5. [Delete PDB](./provisioning/multinamespace/pdb_delete.yaml) -6. [Unplug PDB](./provisioning/multinamespace/pdb_unplug.yaml) - -## Known issues - - - Ords installatian failure if pluaggable databases in the container db are not opened - - - Version 1.1.0: encoded password for https authentication may include carriege return as consequence the https request fails with http 404 error. W/A generate encoded password using **printf** instead of **echo**. - - - pdb controller authentication suddenly failes without any system change. Check the certificate expiration date **openssl .... -days 365** - - - Nothing happens after cdb yaml file applying: Make sure to have properly configure the WHATCH_NAMESPACE list in the operator yaml file - - [okelink]:https://docs.oracle.com/en-us/iaas/Content/ContEng/Concepts/contengoverview.htm - [ordsdoc]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/23.1/index.html - [uc01]:../multitenant/usecase01/README.md - [uc02]:../multitenant/usecase02/README.md - [uc03]:../multitenant/usecase03/README.md - [oradocpdb]:https://docs.oracle.com/en/database/oracle/oracle-database/21/multi/introduction-to-the-multitenant-architecture.html#GUID-AB84D6C9-4BBE-4D36-992F-2BB85739329F - - \ No newline at end of file +- Regarding the YAML file, the parameters for the existing functionalities are unchanged. +- The **CRD** names are different: for controllers based on [ORDS](./ords-based/README.md), we have **PDB** and **CDB**, while for controllers based on [LREST](./lrest-based/README.md), we have **LRPDB** and **LREST**. +- If you use an LREST-based controller, there is no need to manually create the REST server pod. The image is available for download on OCR. +- Controllers based on **LREST** allow you to manage PDB parameters using kubectl. +- ORDS controllers currently do not support ORDS version 24.1. diff --git a/docs/multitenant/lrest-based/README.md b/docs/multitenant/lrest-based/README.md new file mode 100644 index 00000000..f17abd41 --- /dev/null +++ b/docs/multitenant/lrest-based/README.md @@ -0,0 +1,500 @@ + + + +# LREST BASED MULTITENANT CONTROLLERS FOR PDB LIFE CYCLE MANAGEMENT + + +- [LREST BASED MULTITENANT CONTROLLERS FOR PDB LIFE CYCLE MANAGEMENT](#lrest-based-multitenant-controllers-for-pdb-life-cycle-management) + - [STEP BY STEP CONFIGURATION](#step-by-step-configuration) + - [Multiple namespace setup](#multiple-namespace-setup) + - [Create the operator](#create-the-operator) + - [Container database setup](#container-database-setup) + - [Apply rolebinding](#apply-rolebinding) + - [Certificate and credentials](#certificate-and-credentials) + - [Private key 🔑](#private-key-) + - [Public Key 🔑](#public-key-) + - [Certificates](#certificates) + - [Create secrets for certificate and keys](#create-secrets-for-certificate-and-keys) + - [Create secrets with encrypted password](#create-secrets-with-encrypted-password) + - [Create lrest pod](#create-lrest-pod) + - [Create PDB](#create-pdb) + - [pdb config map ](#pdb-config-map) + - [Open PDB](#open-pdb) + - [Close PDB](#close-pdb) + - [Clone PDB](#clone-pdb) + - [Unplug PDB](#unplug-pdb) + - [Plug PDB](#plug-pdb) + - [Delete PDB](#delete-pdb) + - [Map PDB](#map-pdb) + + + + + +**Lrpdb** and **lrest** are two controllers for PDB lifecycle management (**PDBLCM**). They rely on a dedicated REST server (Lite Rest Server) Container image to run. The `lrest` controller is available on the Oracle Container Registry (OCR). The container database can be anywhere (on-premises or in the Cloud). + +![generaleschema](./images/Generalschema2.jpg) + +## STEP BY STEP CONFIGURATION +Complete each of these steps in the order given. + +### Multiple namespace setup + +Before proceeding with controllers setup, ensure that the Oracle Database Operator (operator) is configured to work with multiple namespaces, as specified in the [README](../../../README.md). +In this document, each controller is running in a dedicated namespace: lrest controller is running in **cdbnamespace** , lrpdb controller is running in **pdbnamespace**. The [usecase directory](./usecase/README.md) contains all the files reported in this document. + +Configure the **WACTH_NAMESPACE** list of the operator `yaml` file + +```bash +sed -i 's/value: ""/value: "oracle-database-operator-system,pdbnamespace,cdbnamespace"/g' oracle-database-operator.yaml +``` + +### Create the operator +Run the following command: + +```bash +kubectl apply -f oracle-database-operator.yaml +``` +Check the controller: +```bash +kubectl get pods -n oracle-database-operator-system +NAME READY STATUS RESTARTS AGE +oracle-database-operator-controller-manager-796c9b87df-6xn7c 1/1 Running 0 22m +oracle-database-operator-controller-manager-796c9b87df-sckf2 1/1 Running 0 22m +oracle-database-operator-controller-manager-796c9b87df-t4qns 1/1 Running 0 22m +``` +### Container database setup + +On the container database, use the following commands to configure the account for PDB administration: + +```sql +alter session set "_oracle_script"=true; +create user identified by ; +grant create session to container=all; +grant sysdba to container=all; +``` + + +### Apply rolebinding + + +Apply the following files : [`pdbnamespace_binding.yaml`](./usecase/pdbnamespace_binding.yaml) [`cdbnamespace_binding.yaml`](./usecase/cdbnamespace_binding.yaml) +```bash +kubectl apply -f pdbnamespace_binding.yaml +kubectl apply -f cdbnamespace_binding.yaml +``` + +### Certificate and credentials +You must create the public key, private key, certificates and Kubernetes Secrets for the security configuration. + +#### Private key 🔑 +> Note: Only private key **PCKS8** format is supported by LREST controllers. Before you start configuration, ensure that you can use it. If you are using [`openssl3`](https://docs.openssl.org/master/) then `pcks8` is generated by default. If it is not already generated, then use the following command to create a `pcks8` private key + +```bash +openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 -out private.key +``` +#### Public Key 🔑 +Create the public key. + +```bash +/usr/bin/openssl rsa -in private.key -outform PEM -pubout -out public.pem +``` +#### Certificates +Create certificates. +```bash +openssl req -new -x509 -days 365 -key private.key -subj "/C=CN/ST=GD/L=SZ/O=oracle, Inc./CN=oracle Root CA" -out ca.crt +``` +```bash +openssl req -newkey rsa:2048 -nodes -keyout tls.key -subj "/C=CN/ST=GD/L=SZ/O=oracle, Inc./CN=cdb-dev-lrest.cdbnamespace" -out server.csr +``` +```bash +/usr/bin/echo "subjectAltName=DNS:cdb-dev-lrest.cdbnamespace,DNS:www.example.com" > extfile.txt +``` +```bash +/usr/bin/openssl x509 -req -extfile extfile.txt -days 365 -in server.csr -CA ca.crt -CAkey private.key -CAcreateserial -out tls.crt +``` + +### Create secrets for certificate and keys +Create the Kubernetes Secrets. + +```bash +kubectl create secret tls db-tls --key="tls.key" --cert="tls.crt" -n oracle-database-operator-system +kubectl create secret generic db-ca --from-file="ca.crt" -n oracle-database-operator-system +kubectl create secret tls db-tls --key="tls.key" --cert="tls.crt" -n cdbnamespace +kubectl create secret generic db-ca --from-file="ca.crt" -n cdbnamespace +kubectl create secret tls db-tls --key="tls.key" --cert="tls.crt" -n pdbnamespace +kubectl create secret generic db-ca --from-file="ca.crt" -n pdbnamespace +``` + +```bash +kubectl create secret tls prvkey --key="private.key" --cert=ca.crt -n cdbnamespace +kubectl create secret generic pubkey --from-file=publicKey=public.pem -n cdbnamespace +kubectl create secret generic prvkey --from-file=privateKey="private.key" -n pdbnamespace +``` + +### Create secrets with encrypted password + +In this example, we create the Secrets for each credential (username and password) + +| secret usr | secrets pwd | credential description | +| -----------|-------------|-----------------------------------------------------------| +| **dbuser** |**dbpass** | the administrative user created on the container database | +| **wbuser** |**wbpass** | the user for https authentication | +| **pdbusr** |**pdbpwd** | the administrative user of the pdbs | + + +```bash +echo "[ADMINUSERNAME]" > dbuser.txt +echo "[ADMINUSERNAME PASSWORD]" > dbpass.txt +echo "[WEBUSER]" > wbuser.txt +echo "[WEBUSER PASSWORD]" > wbpass.txt +echo "[PDBUSERNAME]" > pdbusr.txt +echo "[PDBUSERNAME PASSWORD]" > pdbpwd.txt + +## Encrypt the credentials +openssl rsautl -encrypt -pubin -inkey public.pem -in dbuser.txt |base64 > e_dbuser.txt +openssl rsautl -encrypt -pubin -inkey public.pem -in dbpass.txt |base64 > e_dbpass.txt +openssl rsautl -encrypt -pubin -inkey public.pem -in wbuser.txt |base64 > e_wbuser.txt +openssl rsautl -encrypt -pubin -inkey public.pem -in wbpass.txt |base64 > e_wbpass.txt +openssl rsautl -encrypt -pubin -inkey public.pem -in pdbusr.txt |base64 > e_pdbusr.txt +openssl rsautl -encrypt -pubin -inkey public.pem -in pdbpwd.txt |base64 > e_pdbpwd.txt + +kubectl create secret generic dbuser --from-file=e_dbuser.txt -n cdbnamespace +kubectl create secret generic dbpass --from-file=e_dbpass.txt -n cdbnamespace +kubectl create secret generic wbuser --from-file=e_wbuser.txt -n cdbnamespace +kubectl create secret generic wbpass --from-file=e_wbpass.txt -n cdbnamespace +kubectl create secret generic wbuser --from-file=e_wbuser.txt -n pdbnamespace +kubectl create secret generic wbpass --from-file=e_wbpass.txt -n pdbnamespace +kubectl create secret generic pdbusr --from-file=e_pdbusr.txt -n pdbnamespace +kubectl create secret generic pdbpwd --from-file=e_pdbpwd.txt -n pdbnamespace + +rm dbuser.txt dbpass.txt wbuser.txt wbpass.txt pdbusr.txt pdbpwd.txt \ + e_dbuser.txt e_dbpass.txt e_wbuser.txt e_wbpass.txt e_pdbusr.txt e_pdbpwd.txt +``` + +### Create lrest pod + +To create the REST pod and monitor its processing, use the `yaml` file [`create_lrest_pod.yaml`](./usecase/create_lrest_pod.yaml) + +Ensure that you update the **lrestImage** with the latest version available on the [Oracle Container Registry (OCR)](https://container-registry.oracle.com/ords/f?p=113:4:104288359787984:::4:P4_REPOSITORY,AI_REPOSITORY,AI_REPOSITORY_NAME,P4_REPOSITORY_NAME,P4_EULA_ID,P4_BUSINESS_AREA_ID:1283,1283,This%20image%20is%20part%20of%20and%20for%20use%20with%20the%20Oracle%20Database%20Operator%20for%20Kubernetes,This%20image%20is%20part%20of%20and%20for%20use%20with%20the%20Oracle%20Database%20Operator%20for%20Kubernetes,1,0&cs=3076h-hg1qX3eJANBcUHBNBCmYWjMvxLkZyTAhDn2e8VR8Gxb_a-I8jZLhf9j6gmnimHwlP_a0OQjX6vjBfSAqQ) + +```bash +--> for amd64 +lrestImage: container-registry.oracle.com/database/operator:lrest-241210-amd64 + +--> for arm64 +lrestImage: container-registry.oracle.com/database/operator:lrest-241210-arm64 +``` + +```bash +kubectl apply -f create_lrest_pod.yaml +``` + +monitor the file processing: + +```bash +kubectl get pods -n cdbnamespace --watch +NAME READY STATUS RESTARTS AGE +cdb-dev-lrest-rs-9gvx2 0/1 Pending 0 0s +cdb-dev-lrest-rs-9gvx2 0/1 Pending 0 0s +cdb-dev-lrest-rs-9gvx2 0/1 ContainerCreating 0 0s +cdb-dev-lrest-rs-9gvx2 1/1 Running 0 2s + +kubectl get lrest -n cdbnamespace +NAME CDB NAME DB SERVER DB PORT TNS STRING REPLICAS STATUS MESSAGE +cdb-dev DB12 (DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS))) 1 Ready +``` + +Check the Pod logs: + +```bash +/usr/local/go/bin/kubectl logs -f `/usr/local/go/bin/kubectl get pods -n cdbnamespace|grep lrest|cut -d ' ' -f 1` -n cdbnamespace +``` + +Output example: + +```text +... +... +2024/09/05 12:44:09 wallet file /opt/oracle/lrest/walletfile exists completed +2024/09/05 12:44:09 call: C.ReadWallet +LENCHECK: 7 11 7 8 +2024/09/05 12:44:09 ===== DUMP INFO ==== +00000000 28 44 45 53 43 52 49 50 54 49 4f 4e 3d 28 43 4f |(DESCRIPTION=(CO| +00000010 4e 4e 45 43 54 5f 54 49 4d 45 4f 55 54 3d 39 30 |NNECT_TIMEOUT=90| +00000020 29 28 52 45 54 52 59 5f 43 4f 55 4e 54 3d 33 30 |)(RETRY_COUNT=30| +00000030 29 28 52 45 54 52 59 5f 44 45 4c 41 59 3d 31 30 |)(RETRY_DELAY=10| +00000040 29 28 54 52 41 4e 53 50 4f 52 54 5f 43 4f 4e 4e |)(TRANSPORT_CONN| +00000050 45 43 54 5f 54 49 4d 45 4f 55 54 3d 37 30 29 28 |ECT_TIMEOUT=70)(| +00000060 4c 4f 41 44 5f 42 41 4c 4c 41 4e 43 45 3d 4f 4e |LOAD_BALLANCE=ON| +00000070 29 28 41 44 44 52 45 53 53 3d 28 50 52 4f 54 4f |)(ADDRESS=(PROTO| +00000080 43 4f 4c 3d 54 43 50 29 28 48 4f 53 54 3d 73 63 |COL=TCP)(HOST=sc| +00000090 61 6e 31 32 2e 74 65 73 74 72 61 63 2e 63 6f 6d |an12.testrac.com| +000000a0 29 28 50 4f 52 54 3d 31 35 32 31 29 28 49 50 3d |)(PORT=1521)(IP=| +000000b0 56 34 5f 4f 4e 4c 59 29 29 28 4c 4f 41 44 5f 42 |V4_ONLY))(LOAD_B| +000000c0 41 4c 4c 41 4e 43 45 3d 4f 4e 29 28 41 44 44 52 |ALLANCE=ON)(ADDR| +000000d0 45 53 53 3d 28 50 52 4f 54 4f 43 4f 4c 3d 54 43 |ESS=(PROTOCOL=TC| +000000e0 50 29 28 48 4f 53 54 3d 73 63 61 6e 33 34 2e 74 |P)(HOST=scan34.t| +000000f0 65 73 74 72 61 63 2e 63 6f 6d 29 28 50 4f 52 54 |estrac.com)(PORT| +00000100 3d 31 35 32 31 29 28 49 50 3d 56 34 5f 4f 4e 4c |=1521)(IP=V4_ONL| +00000110 59 29 29 28 43 4f 4e 4e 45 43 54 5f 44 41 54 41 |Y))(CONNECT_DATA| +00000120 3d 28 53 45 52 56 45 52 3d 44 45 44 49 43 41 54 |=(SERVER=DEDICAT| +00000130 45 44 29 28 53 45 52 56 49 43 45 5f 4e 41 4d 45 |ED)(SERVICE_NAME| +00000140 3d 54 45 53 54 4f 52 44 53 29 29 29 |=TESTORDS)))| +00000000 2f 6f 70 74 2f 6f 72 61 63 6c 65 2f 6c 72 65 73 |/opt/oracle/lres| +00000010 74 2f 77 61 6c 6c 65 74 66 69 6c 65 |t/walletfile| +2024/09/05 12:44:09 Get credential from wallet +7 +8 +2024/09/05 12:44:09 dbuser: restdba webuser :welcome +2024/09/05 12:44:09 Connections Handle +2024/09/05 12:44:09 Working Session Aarry dbhanlde=0x1944120 +2024/09/05 12:44:09 Monitor Session Array dbhanlde=0x1a4af70 +2024/09/05 12:44:09 Open cursors +Parsing sqltext=select inst_id,con_id,open_mode,nvl(restricted,'NONE'),total_size from gv$pdbs where inst_id = SYS_CONTEXT('USERENV','INSTANCE') and name =upper(:b1) +Parsing sqltext=select count(*) from pdb_plug_in_violations where name =:b1 +2024/09/05 12:44:11 Protocol=https +2024/09/05 12:44:11 starting HTTPS/SSL server +2024/09/05 12:44:11 ==== TLS CONFIGURATION === +2024/09/05 12:44:11 srv=0xc000106000 +2024/09/05 12:44:11 cfg=0xc0000a2058 +2024/09/05 12:44:11 mux=0xc0000a2050 +2024/09/05 12:44:11 tls.minversion=771 +2024/09/05 12:44:11 CipherSuites=[49200 49172 157 53] +2024/09/05 12:44:11 cer=/opt/oracle/lrest/certificates/tls.crt +2024/09/05 12:44:11 key=/opt/oracle/lrest/certificates/tls.key +2024/09/05 12:44:11 ========================== +2024/09/05 12:44:11 HTTPS: Listening port=8888 +2024/09/05 12:44:23 call BasicAuth Succeded +2024/09/05 12:44:23 HTTP: [1:0] Invalid credential <-- This message can be ignored + +``` + +**lrest Pod creation** - parameters list +| Name | Dcription | +--------------------------|-------------------------------------------------------------------------------| +|cdbName | Name of the container database (db) | +|lrestImage (DO NOT EDIT) | **container-registry.oracle.com/database/lrest-dboper:latest** | +|dbTnsurl | TNS alias of the container db | +|deletePdbCascade | Delete all of the PDBs associated to a CDB resource when the CDB resource is dropped using [imperative approach](https://kubernetes.io/docs/tasks/manage-kubernetes-objects/imperative-command/) | +|cdbAdminUser | Secret: the administrative (admin) user | +|cdbAdminPwd | Secret: the admin user password | +|webServerUser | Secret: the HTTPS user | +|webServerPwd | Secret: the HTTPS user password | +|cdbTlsCrt | Secret: the `tls.crt ` | +|cdbPubKey | Secret: the public key | +|cdbPrvKey | Secret: the private key | + + + + +### Create PDB + +To create a pluggable database (PDB), apply the yaml file [`create_lrpdb1_resource.yaml`](./usecase/clone_lrpdb1_resource.yaml) + +```bash +kubectl apply -f create_lrpdb1_resource.yaml +``` +Check the status of the resource and the PDB existence on the container db: + +```bash +kubectl get lrpdb -n pdbnamespace +NAME CONNECT_STRING CDB NAME LRPDB NAME LRPDB STATE LRPDB SIZE STATUS MESSAGE LAST SQLCODE +lrpdb1 (DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=pdbdev))) DB12 pdbdev MOUNTED 2G Ready Success +``` + +```bash +SQL> show pdbs + + CON_ID CON_NAME OPEN MODE RESTRICTED +---------- ------------------------------ ---------- ---------- + 2 PDB$SEED READ ONLY NO + 3 PDBDEV MOUNTED +SQL> +``` +``Note that after creation, the PDB is not open. You must explicitly open it using a dedicated `yaml` file. + +**pdb creation** - parameters list + +| Name | Dcription | +|-------------------------|-------------------------------------------------------------------------------| +|cdbResName | REST server resource name | +|cdbNamespace | Namespace of the REST server | +|cdbName | Name of the container database | +|pdbName | Name of the PDB that you want to create | +|assertiveLrpdbDeletion | Boolean: Turn on the imperative approach on PDB resource deletion | +|adminpdbUser | Secret: PDB admin user | +|adminpdbPass | Secret: password of PDB admin user | +|lrpdbTlsKey | Secret: `tls.key ` | +|lrpdbTlsCrt | Secret: `tls.crt` | +|lrpdbTlsCat | Secret: `ca.crt` | +|webServerUser | Secret: the HTTPS user | +|webServerPwd | Secret: the HTTPS user password | +|cdbPrvKey | Secret: private key | +|cdbPubKey | Secret: public key | +|pdbconfigmap | kubernetes config map that contains the PDB initialization (init) parameters | + +> NOTE: **assertiveLrpdbDeletion** must be specified for the following PDB actions **CLONE** **CREATE** **PLUG** **MAP**. + +🔥 **assertiveLrpdbDeletion** drops pluggable database using **INCLUDE DATAFILES** option + +All of the parameters **adminpdbUser** **adminpdbPass** **lrpdbTlsKey** **lrpdbTlsCrt** **lrpdbTlsCat** **webServerUser** **webServerPwd** **cdbPrvKey** **cdbPubKey** must be specified in all PDB lifecycle management `yaml` files. To simplify presentation of requirements, we will not include them in the subsequent tables. + + +#### pdb config map + +By using **pdbconfigmap** it is possible to specify a kubernetes `configmap` with init PDB parameters. The config map payload has the following format: + + +``` +;; +;; +;; +.... +.... +;; +``` + +Example of `configmap` creation: + +```bash +cat < parameters.txt +session_cached_cursors;100;spfile +open_cursors;100;spfile +db_file_multiblock_read_count;16;spfile +EOF + +kubectl create configmap config-map-pdb -n pdbnamespace --from-file=./parameters.txt + +kubectl describe configmap config-map-pdb -n pdbnamespace +Name: config-map-pdb +Namespace: pdbnamespace +Labels: +Annotations: + +Data +==== +parameters.txt: +---- +session_cached_cursors;100;spfile +open_cursors;100;spfile +db_file_multiblock_read_count;16;spfile +test_invalid_parameter;16;spfile +``` + +- If specified, the `configmap` is applied during PDB **cloning**, **opening** and **plugging** +- The `configmap` is not monitored by the reconciliation loop; this feature will be available in future releases. This means that if someone decides to manually alter an init parameter, then the operator does not take any actions to syncronize PDB configuration with the `configmap`. +- **Alter system parameter feature** will be available in future releases. +- An application error with the `configmap` (for whatever reason) does not stop processes from completing. A warning with the associated SQL code is reported in the log file. + + + +### Open PDB + +To open the PDB, use the file [`open_lrpdb1_resource.yaml`](./usecase/open_lrpdb1_resource.yaml): + +```bash +kubectl apply -f open_lrpdb1_resource.yaml +``` + + **pdb opening** - parameters list + +| Name | Description/Value | +|-------------------------|-------------------------------------------------------------------------------| +|cdbResName | REST server resource name | +|cdbNamespace | Namespace of the REST server | +|cdbName | Name of the container database (CDB) | +|pdbName | Name of the pluggable database (PDB) that you are creating | +|action | Use **Modify** to open the PDB | +|pdbState | Use **OPEN** to open the PDB | +|modifyOption | Use **READ WRITE** to open the PDB | + +### Close PDB + +To close the PDB, use the file [`close_lrpdb1_resource.yaml`](./usecase/close_lrpdb1_resource.yaml): + +```bash +kubectl apply -f close_lrpdb1_resource.yaml +``` +**pdb closing** - parameters list +| Name | Description/Value | +|-------------------------|-------------------------------------------------------------------------------| +|cdbResName | REST server resource name | +|cdbNamespace | Namespace of the REST server | +|cdbName | Name of the container database (CDB) | +|pdbName | Name of the pluggable database (PDB) that you want to create | +|action | Use **Modify** to close the PDB | +|pdbState | Use **CLOSE** to close the PDB | +|modifyOption | Use **IMMEDIATE** to close the PDB | + +### Clone PDB ### + +To clone the PDB, use the file [`clone_lrpdb1_resource.yaml`](./usecase/clone_lrpdb1_resource.yaml): + +```bash +kubeclt apply -f clone_lrpdb1_resource.yaml +``` +**pdb cloning** - parameters list +| Name | Description/Value | +|-------------------------|-------------------------------------------------------------------------------| +|cdbResName | REST server resource name | +|cdbNamespace | Namespace of the REST server | +|cdbName | Name of the container database (CDB) | +|pdbName | The name of the new pluggable database (PDB) | +|srcPdbName | The name of the source PDB | +|fileNameConversions | File name convert pattern **("path1","path2")** or **NONE** | +|totalSize | Set **unlimited** for cloning | +|tempSize | Set **unlimited** for cloning | +|pdbconfigmap | kubernetes `configmap` which contains the PDB init parameters | +|action | Use **clone** to clone the PDB | + +### Unplug PDB + +To unplug the PDB, use the file [`unplug_lrpdb1_resource.yaml`](./usecase/unplug_lrpdb1_resource.yaml): + +**pdb unplugging** +| Name | Description/Value | +|-------------------------|-------------------------------------------------------------------------------| +|cdbResName | REST server resource name | +|cdbNamespace | Namespace of the REST server | +|cdbName | Name of the container database (CDB) | +|pdbName | Name of the pluggable database (PDB)| +### Plug PDB + +To plug in the PDB, use the file [`plug_lrpdb1_resource.yaml`](./usecase/plug_lrpdb1_resource.yaml). In this example, we plug in the PDB that was unpluged in the previous step: + +**pdb plugging** +| Name | Description/Value | +|-------------------------|-------------------------------------------------------------------------------| +|cdbResName | REST server resource name | +|cdbNamespace | Namespace of the REST server | +|cdbName | Name of the container database (CDB)| | +|pdbName | Name of the pluggable database (PDB) | +|**xmlFileName** | Path of the XML file | +|action | **plug** | +|fileNameConversions | File name convert pattern **("path1","path2")** or **NONE** | +|sourceFileNameConversion | See parameter [SOURCE_FILE_NAME_CONVERT](https://docs.oracle.com/en/database/oracle/oracle-database/23/sqlrf/CREATE-PLUGGABLE-DATABASE.html#GUID-F2DBA8DD-EEA8-4BB7-A07F-78DC04DB1FFC__CCHEJFID) documentation | +|pdbconfigmap | Kubernetes `configmap` that contains the PDB init parameters | + +### Delete PDB + +To delete the PDB, use the file [`delete_lrpdb1_resource.yaml`](./usecase/delete_lrpdb1_resource.yaml) + +**pdb deletion** + +| Name | Dcription/Value | +|-------------------------|-------------------------------------------------------------------------------| +|cdbResName | REST server resource name | +|cdbNamespace | Namespace of the REST server | +|cdbName | Name of the container database (CDB) | +|action | **Delete** | +|dropAction | **INCLUDING** - Including datafiles or **NONE** | + + +### Map PDB + +If you need to create a CRD for an existing PDB, then you can use the map option by applying the file [`map_lrpdb1_resource.yaml`](./usecase/map_lrpdb1_resource.yaml) + + + + diff --git a/docs/multitenant/lrest-based/images/Generalschema2.jpg b/docs/multitenant/lrest-based/images/Generalschema2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7e7c20c045cf40599983304a17c99bd187e9ebec GIT binary patch literal 96239 zcmd421yq~S@-G_PN?RyW+$B(;xR>G(Jh-=bfIxAVQlz*Aw<5u%NN`GV2=2v;7N>ae zOV2s?f6w{fx7L%j-dk_-$$a15duCfEvwwT?@Z;ep;5kT6Q4WBD0sx>qegF@vq~Byc zAyxo@q9Pjr8vp>{0`LH+fTxdK=;H}M!TFmFppEv({)-Ps1po;D#s5ts{}-?D$W#0q z&-yR^F*|?<1rD%k{*6#k z(NO;qIvN@(8U{KB1_nAhItC^dCI$u;COSGM4ki}%U&6q|!NtMG{rm9nDCDn%zoi}z z?8j37$nt;d^6(u%fDL#L2t-4n2cQz5pb?-vbOWd#n}LGyDEpsH_;+L00j*V4Fw$?2MZhTZ?4C_eylD50E>_g`vpBW5izd@1CQi;Cui5{T0XFrOMFrd z3BQ!|$DrJCQbtWPw}iyJx{is-M_E*lS{_^f@47It&@oWZuyIfyllcf9Wk2=>8YbrB zQ2iwoR06=GGD13TNldI48rVEe^zT2$=XT%_OI45mCL&>Ym@;ed@jf(@7qxC){TDsob;p5#Q#Wp=wUM&?3D= z<+9&KPE@M7O4vtI33WwSf$OmZS0{Y+7Xd%0#^zh-+~G6lxdy4sO-v~zk*Ny`QWJ6D zDwQOTLw(%)+Rs>kr_D?JMY|OG@;R=`=dPtK!x4TW#W@BZhIE@uQV~nK@J_C6(x=y@r6jhs-|6uFH#!nFj-IiliK*iv^YJkz z{)2SWvepuJBomk3j|usib1@wI{EaOL^wgJ0d@3c$3_BxLv={#-Wulqc4KL)`giC33a<_^?!GV6>Ytv($QOFJJ^F z&~-@&8&18Rp52-szrZsX9Ho06}aup1Ix5PWpq;SLV42WJ~hd-b6#p> zsqs@u#&eQrUcEucuhT3YgZj7|zSV5g#92ixrUL;7bV>MZ8Eb@2uhqEn8E=aQ+9sg>x;Sa%d+U{|m z_w-8^0B2blOt>)JiJmYH6Q~?UVpv8&F9ZK_Ld%=+B+8 zrwz8AS&HxH*ctJ)eQhifWgaUQVwiyK*6xw&SB*GOijEh{^oRcGDgM4 zd#-ne&!i$IP7yTb+plM>cvMV#n1F{C2ltHV5Lp02imV$&307`e$-|c=hR@pnARY%2 z*rvq)vc+sTNjZA=kHZv=m8&pQ@=S1v4fKq|2!zzkWXvwGorxw=Ox$=A&1Rfy#jzJN zTu5!)#m%hDAdVLT<$E?kXhgFj@;=1B7(mlsmUCY&@c{7SpvpBVyG)KA$>7|Ou}VJY zKPRR4^f|<}&y*N*u@0M9^Sl1uRFH;?%D70yjng#1%NJ0B{;W(Yq~&ZdqCq|n3~h1N zK3Of{XNEge>ztb-=Iw0sygt5;3zcVislO#fwYr#q)6xR z5ZfZzeJC@Omd0L>Plr;!_r(|;$ADrDXTKoh42tGdk!>^x9H>i3_>-wODZXzNH-50w z^D1)8O0Kxa%$zcXs^eHEo9(3#SSxYki=-k}e-J-IKGg4%rLL`8i;u(p4QLa#;>Ke{ zu1Qd!XkV3v=TsL|m-MG~L_5LzmxjJWwNj;tosNt^j;(D%d?wIo3=t^4DJ^sby7!a$ z(>baDNi>xkQrkI+^T1C*zM|GT_H#ud7T950-{Z8m(0PMY?fd-5sg_`=w@X!ZQ6VWL zU3kGbr$Ec;n#>1)TixD@j>#;yX8N!hQ?~7@Bv|xEMac*V5LDZp87H|ni*CzXf4*lm zUc0SRem$jO97gHw!-}hKCN%{YO6;a#!7I52`}LV*SUXjO6IXOW`q*EChmGsU_Dfy% z5wE?`Wj*3&3Rbr|G$akYF><@FzU)ye&;h6n=*|bFiYmaj1<7Z=xz7(=^c$E=bfR&m zt)hl`IQZu(KP$t(He`9UDC$1vks{NhKD52uQ3FlrtUa$wA&7a$UdqG6^F}!)|BbiH zjTx?rcdeDl6z(T}1sljN<62aXyib~2+?Nk>K24BojaYG3QMVewI;GN#fQ6Yi6At`e zAk!~3sZ%tFyfdCErKfR`2L}u6r0Q`5l$99Ix-OI|Y&}y;Fk0F$y~*LNf+bC_CHpq< z6i&hwz==ZWN|LIyasa&MH2rV-q%Gx?if@7@QZkU^kHHlI zo$fFQQO_Gbmo+|K=HGTKm6|Wq{!`(UxLo113^w@CZ>dKo=2Ch+`txK^t3_%$K+3|1 zF+qD=gJ)NYq?Q5Ov_4KFtQ`C0=a()7HKpql9T-ZDDWNyI(X+_026164c+hTaggBV3T$gsfHK5R)u*0sPuk&J&NuitnDar%` zYRx#!C z+^Of-Co|@9jdbg<;-GcCUm=r{mHP+%&Cx?sY3`Z9%4 zF78HDFf1Iyo%)Xv(T$Y@a%DlqVc}!_G*H`{;hcX+m+3NVGV0qrVG?7mqG@EYFgiQk zHY^nx^Cvl3m*V3QS|AZ@y;5=~3Sv;Aeqj}>R$}6gQKa`#J#JmW;9#C#5tyK~BQ+39 z_6<*5JLv=Clgy|W6<7rGRyrDa$p(Qt3E*q%VhQtlbKm-bCJb%1%6Au201b-zutlGJ zXJIPWy902${Q_vr`DPIh=ca>q(b=VXo0E>1wNxs|Oe%T@N6)vag{}u^j$;Dm9pK@} zo;&EP7;(64gR4Whb-89#B)wrr!d!u(HFYT+N*w3{&y(IOfl>Pv0PMc=Gh0K3mv+vq zGm8p>-OH9QtR^?essKD+82c6VUfzFJrx9p~spTNTEy_)iLkKCdT?*gee`E1&4jAna zu-K7=RR?GNzMyW&9zwI)9ctffy*Tu?jBVD@4)YY`$8)!oN@jUouPNf>l>4$Ds*;47 z$)2_zwW>dx-eR&OiW)+H-gJ3syQ_mB$vy`)f3k-O(}hYcd44ra`)wf*M2M}=QgSud zXH(~~WH<^gscg+p0xHuc(PYNY>QFXtTpAjxj%^TwH%?vRr3fX6BYM~5MPbSJ%ve1ZDyI-sqTcr@G~zJ zp&Q0JuP>BNP91}|7l-@j(})J`cJh;0;nzQ4*CpqARU%WkahiCJy@X$azOhLFE;-qb z8IlRx+;lB`*e|^Im+vN7%-__m)3tx><<>RfOWpST9H(8i`lgYSW@Vjmi4hF?H`>{uzOlDW94<31^< z5Q?_qP8xj{(#&uScBifFL)~+|_{FUvy_-Ge zkV^={BQ$=ORKE?Dg3TxBM14%2hOxS;tMBBsp)_Yvmqlq3RR>F z8hD2BGn1rW{S=+Ld0%B_!qTlb&a2=VQU5j7gs@1Xc?2IrAY4pAO)i6FrnvK#a~0TKK(eM`kj5#8yc_GT>)7LZR#Vrf z^?El%Yl>9k3gw4S*rgDk&3MU>9qo9&TwLs{En+sx#(prkh};0>&0ARf1E7s@w*E(l zraId!bl4fpBK^79eB7obku!x{TDAEn@`sdqT=fptDkkx`xEj&A{~`alT$ws`?nK$$ z)Gu3twc?r(R(3+d6bcqf1u?CpCs9#RJD{b=Dkuk1vBrbN?V`#qMIDl+T;aB%<AF zY^ZL?4u(nLMKd7?#wC^H&+3A?GhE(FIJVnN+8|iik~cX9R7-Y4X6X^UtMWZZ*^&kc z#2ZBI*g{mV`TBOK%o@~ME0R{+;luB>iX)(NPHZy|05U~=sTd_fa9$<`o#O8hFM`7R z4EKd;{qUlWlA5z=I#s5cBqk6@VSqAlMEKc40lj>_Tr?f#*K?Y|?SM*`-znw2|BEkR zII#X9ePa>Oty)^~WSp(t(&Nw5lJXnu0Q=ZK;nUjH=}fx&l@9=v34(a*IsS9`rLy$; z(Hgtm*cM&BU~)d2iRU)s70T@v}i$#W@^^>W3p~m*kqb z{4v58lj6LwT;ePlv2)*I-Da7MNwWg#h)|2L9e7_X7zu$4!`G52@I?qwKjV-wbbC|o zLgz+(t3Y=KAvVTew~rLE=nLSV-wAIU8T-x56fJato(jOSU*am7JoPNo8yWXm^61KN zo4eS+B{cddfJ<)XOc<-yRh?z{@sei@L6*FT`rQ}%R){N1vyKI+Dd6rw3QBxYGR~S{E&?zL?CBDP97Npxl zb~2LDTrWEZd6Mbj;%|CMP+?csQY~^(@6ZgFqu^=-xx%@=B2#6yZsLA|)oT>qKv={S zclnC7L}d>-RCnt*vE9n3Q$Us_pbVu%#ki)ojgx*z^3Kk1TwFYs^(P?$<%f()#U+gx}!tOm)U6F+tD^ z?#OxzuCqYtNd+7tlQf(A z1ni)l^|t%xoQ&@P@3?bTif$1$nghyBkmM`?+9m}f{1f&MGV0@UahXr*xj#Ss{ySqx z&puw8o3^W6+1A4aCJz)iJN$Z2yV@#V)uSfJD9b1A@m;o8g|kD0Qj7xZ7^EG z)_6y}@>8Tkp){?KUA0B}eciW(QuSgc8T4Y-pH5`r+*MGk3T0#J;(4~$M73mN058VG z?`NkI1a$vpm}dW*VaivKF;LrldI!}T&sUKAhZ&3}|CiaaE%0@{V|cWTgi}|~@=aZB zCI}wp{sP1QKSGyL!_l<=16`(h#Mb{Cyo`;5g@THKiHnAg_lWs=JqG|V9`W!;{2L1o zALq#ke~;u)Wul3Rmlz-3~V`IJ~)TgNY;W#I-($}cGDoMe$zaPR0Im(#I!omdSH4J+)L z{{5($`Y#TR^&iML%3ov~3mX;ZFY>MS2uD96^k|s4Sm@a3|A3_b1$`4@(Q&_^=aJNK z!X}ae|3##$$B7wu&73=a2jxwyl8`d;Nhj1CUrD5z z3Pch^OJ(<2nwpw6P~)t^N1S;550+t8Q$@>rxQUVG@3B{Hc4E#linr$EpNNI0+{wAf z<&Arrfn7CUm8n1z?XYQn)>nd0)Sz(ph>{;YXQeDXAf%F|@`@#SOB1p3QN!=aTY&9k zZL|;M54w}7`mh$i5?Ulr(sz*evuO$*c)-WC?Tp*5W3F^((Nf(3A!v=MEE6Z=!t4XU zTJif=rMe6yUFJ63rWmb@d~Lh}A=S29vI8x(hy<69cD=CGZr@$CE>$uAkHka+R|A`G zKtFn)yPY2N(`17YxpHT)Gr{SZ*9MEUDL;24;Gl#M9BM(nC|K% zDR&^U!Fk^I`UwlMWBwhLyEkVK0Kn&>3-qLmL zPTta!X@_c`+u|{5Uar4;0DO6tOJV8k&5QVrkf@iH3HV*h$~igr0H{m6l!%J_AhvhU zF5&Y>F~0ZPpQYyPT5$V!x;gD3nSC`LMUJ~=;`sJ)<@|F(YhgN7?2R(*fX>~8l?Q;% zDz}JuVfD(DzJ$x4QLgW&=b+K$dqNXGv@K{Wq$>=CP|4kTW-7PL)hyikj{2?p?A4N+ z;8NHZk0TKI1Q!APT!ud=IrcuM3%$)EtOex-;T5fx8lUN59X~U_L}ocB=QRZ zr^J*_#MMyhvCTn)hGYI=HQPKo6;6^WvyJLsi^c62jQSorh*eFUPdm#+kKW$v4yYS( z$aD$o#Z7br5qCW-5yDPlo3Wj1oFsBg=FaBK?3SmNyU742BhQ-7sMJ<)T;}#1vmknz zyrR4c&0Ce%)RJ}Hr!m?O8voGF9q>d5=aU;`#!IH9Wm(4CP=6#)Tg(cPkWIkO;m|% z3X_yNoHu{jTI<>1QJ*Z=IV`DK2hzbbd1EVVGBs<{q^ik$-M8@UT@=LRj~ZGI()Pgoe9zcWbG!!*i<2nt0B#97IWfIH~T?&rE(iLHMdA zzbz9HoHtfMQk+DIj*^qsLfK7C-z|K-^*maaJcA2NASz_9ak1%@^UO8A1@~uOQ63x` z4KD)E)NKK5<2*nAV@Bf(O=PD}1meFHn%w!~R5U3`8AuN08ka1djAq}iU=u5)KU5E$ zrYLaE3HQEI_UwC6P`5^_NfI~S*{nlH_FZt`Xk83gBj zq6C=pg}J!34H&8s6ib{(1m8;Jh*~RmZYq?>Jn{TCpxM!@(5&vL1ZGLy7Wh1@z~dQ*}iUNN2vKjf42wF%>DD z#lGF=g9eXe-G28sT(xnlqsfhy8GU@x2bE0`X#r8R!6~3*n8~!(S7cg-<{`ucrzCnG zqUhOkA36}=^_QpCQm&4Of5cey5UV&3pu>9KfqfT(LJ*5UnFG$-&QJH z9j(E@-&S$GWOtu1IR(fuaa=nfM>R{j;W#)5Xl_CxqASxiX(1vcP-wL z>#XPWS-xsGZ?e)?KR&c9WZHCXOHAjEun4l!@K7}>0ced&@=c0_s`mmx23pfpvFA#B z2X_wqcejZrIosQ)-vPRhK0`8O;d;^T=89!gVtnilJS-Yu7#$G6cw2NW0{99begIs< z)7|eI$nWnJuhh<5CA4Z)S7z2tR|X`2_c4L502l%HC01GO6}|243I3XQRL`bgO*Wi2 z&R(9VAp`Qi-~muf9{{ELMUHz;RYw~+k6@R)yitpYmTUfdLsIc0-$(7WuZle{c_s-@ znoS0VU-s}w^0KxRacTv!*?dWdWJ$A{6TvyyYy)3K{dZl23&fV9FNDm7?9!d=Hy!M) zJr|BIWt&)Z$^=iM3}<#4?bqX~HTA`}%i}Lt!NtZ4%HWLfw`8nul_M`T&OH7E4M}9` zZL3wI@Ng~3kFTuZy}oxc|AKz=UxS1e4cw`Ed=i=*@2dj<) zSjLsZ(ld{=N@Iz+^s@>0c)&ba5OVjzZ9Amy*frBAxvZ~bx|&#pli4KN;%Rt97M{so zkCxyNNzlGG)0sJUt_q>~6P0)tI2Yv&hb$Lm)|_~O^aG%lSyJ;J$_}V=DgYO!lkpj-z6m=r#yse z``dCO;TO8Q)EE8}RZT8xPV0Tdnwe9kx`SSkDqa?tS`Pr<<5z>}*UC0ca!QF00HZwS zdz+~=jau7T6Y59AT&D{c6H8t(Zbe^SPuI4d;^Z zdkSH>#8p0+)hoB(tqq$w?3ALC=K;`8xHBCKeHq5NE$L$IYTOjRE(4+-i_wOnZ|+VWo#Vf*sWy-uXs8 ze^l*yq1)TJsfPLOKrLi}B%9YoB!fi_uCTi1_+oD-?`NSbQJ#env00v3ak}EA($L|C za?8YfEc{4U72%>GcK#CIo?-#Z_&xzUNd2)w`LNK&N~`m|qFI_iF+x)7D)dKQW)~4$ zt*72VpUdu^K?2!kTJK}9E349fDvE<$cyX%zCj1W)Px=Y>V7n{n4|+K^;VE@6Mk5Z?KMZUT`%XIaB3Q#!O5oK zf#U#=po;=ecrz z;%iS$2T%l*bK)|>l;m(9yyPX%->kQPe^N9Q8x7vfKAFTm&n;QhZ`%9wO!8tm%a2mX zkN$}nQ<~y?_I8flE+st*)!1CIFIT58Au}mkjjJ ziiHPy#`D2mpPcEJo=*rQC!9!6jUkR^PPhpZpn8?EMAkrVK%{eVM^F!)$_U$@GcwPo zRd#7ji1YTS)LQV-%Wm(KJe@4e+G`)b9qcX*2e<7Ony%7dP**d((dd%8%ZGW*`c2qb zGDQDE_&pmBUbfD67FaVN>ot_qp--^ugVSxCTV)t(JC1jn^Wn-6E-R`Cg^KZj*Ma421fdY)zKZdHMkHhSge1xo<{I$to~{ z4IFWv48fDQBHd5~&8xC)UIg%5CO30zE!u4PQ?UiuJEsNw3B%lDF}KpX^YPU$e*iRt zzL=gxv94Y30H(zAVSJG>b{Ff0@+^-i zQY{8om+ERjvpe`N?OxI8>VZRwAyb7Yr3zUH*_rDo=jKH^Dz)O8;}e2SJ||Jbm)MzA3b0 zwW&0PFK2w}3r+L-13(}=gn7$9tMLhquv5M(Btd^O3#+&+5^H4OYqKy?u17|;F44>R z+?MC4FvFU?)XJv?Zfoum-2!ngEVwp?zivB9bJ`M`^nLn1yP>9Dut(>*bfqW4+PSfX z9zr{}nt6_eVH?COqIi5|R;+m{$g=uEBEMm2#bEd`d>|m|Bn9-U#_06c^iIiCK%G7I zCjOm8nxn_v-fsIvKD_|RVgC`ZmzP-B(qOVh(JS<^Q1tZm_cEi=3E8q?u{Auan?Zx8 z-Qx|z!!QnebI4m+YpntbkVuMX@YP{~(i3nk8yn+C_cSI!Vsi>7FTefc{9!Lsk)Toi zS*5fVHK$ZGp&w(_q!a~oT*;UA(m#G}X_dUJ8h2POZUkmzM6fYxLb;WVdk9TK4h@7A z*+{vhcu(cIS~I9da&$ub4yJFz^R!N7;%`jM%hZ#O)ju~SmiP}0S)Dx^&_5?@34#Xt5EONqJ1H7)t#~wG}iI38p49ZT&2N_JjHYvi6B8a+P3%g{Q++T8_ zmKe;^TTIEEv-E{e&~j%bIf#p88n;kJb+i=vS1Y%4-4)?FsITRH-y*I9DTJU!#rY*F3jA}5~kJZG0A4#={~@E}`wBEK)wogMYm<`L9rA;N6S z&}6#a{xfzNc(8#MN}(UbBnh)O1*7bp6OvV-z7 zCl#=+rbhi8fSCjr=Px2C21w8k$GHa`80UQ|L=p`Yr-v+R5-roG=CMeObLNn%DVpe2 zE@w?z$+-KwL`IS*H6o}*buaoLeQ5$mH*HH*1bS@~mp*X9t*|2M`41G3-c~yUaGr}X zqaR^O?onndOn)Xb|w>ITt^fRWhoP9(PpZ<@=>tW#49jM zQZ|ogc?uEQ@ci?RGo&e!6ygcBW14v(hzhGTAZ>jms!HD7C6cM?%sdpAg>d%ec)>sT ze(A|=mh*z>EvEbTUO}Q~3xn?AF#CpQ3EHZ}WK8__&Sxs}o?Qr6&PiSm_jAH-%uG*k zrkyH-hK~ND+xtk^C=>cOR8FIp-{0k26xoH}(_$%F@4NL$te^x1%x6^sCgHJ%dS}W0 zv^=C+d#mqI+pqQRH)9p8*U;$v9lrf5*!-`g|1E|60r2l-e_{mUJ^;2eZ<58y13o|T zxoYm#1619Ik2aZ)7ySwBYtJZuO9G%}mc1#y7QKCRxJF*Nvit~4BR$ODK03xo4G^jx zdxWLGvcHYzbED{b-P!q`~zJn@v`W_dJCsM0@~&pM*kn0rS_cq>7tZc}Y>C%HdR zC+SSrH(Z6;Y__LK=rqLmyUQ_D+mwqu@J zxqftS=4i~~;73Gt`{(#ep%tFGAHRLT;vY(5q-ZF`V{fd2aN=%1!FBIwD{bvGZ#OHBkSM&pwN zKVMqTa*3GxzFZDr zxu`zR!}-SEgbk~5@`V-l^5Uc1(Za&1DcwYSM)5&e^-5J z|3y9lFRJa$XRkT9BwJX@;vcbZA>Q4%5Y`s&!H^@VADsLs9>(Fg=Taa~MIO?;-^O7Z zM8|V&9?M{ke3^ZTB-be24cO`tyr`z<;7p+sr_FoiMrFR$MyEJa#}@0T0B#CgI=k}b zmREkRG%GEl-SjO1wg_Utub3D?BaDpslfJ+-MQIx8;=sh zCJAIAh0~nuH^;+6y`=-SfT7kwo|{@8@djdV^FicPyZx`7)6_w!e)_A7PM-nJ@Tj!G zKOozfNBdv5+MF5S>Ge@f)g4~sqG0g~-`V8a82NN6@6ft>Z&yx7)D0U^v9?&mjw!#IB2T3w8FmHu)ttBLs9nAPf!=UO~E&heEt zh3zidq)Y$++u#2f{Rg3-y>DI`pQuxjF-?+cP_&qI(yA{8r?eU`-RmK%DLZ}OI)acTI0UhkFzFtV;Cdiy?OudCs!fZu!fm@5%i zC7*-iD#8$s?8r8G%M%;I@fl3n>H{N>VD72bH@93KLkknUq+q0=#q>2>z3tGjp%;+2 zDMUr;zR`V^r_iFMtI9x}Sfs2oSv76tXJ%Q*1Tx)cfOI`dWO>(x{+`E?YBZzm#hXn& z&??qWTn|u3rV`hrJSWn9>JC#2acyTX1?--N`}p{v`=@4v>F#YwLdUE2S~k>YJjdHX zm5z2rtxay5;^a$d6TQRsCt51mmIZ4SqRw>KVXq=7=VgV?3us>DReESsn@vG_6PGHTyHic{1@sjBww5G(@_KvNO?&KQ zZ**U^CU3EYaz}w@O&oTFXZYJ-h&ck=y#Rj6%{@~wkh!u%gxE`=4+w+EOo(IiQSyaD^1A-p+U9%6EvL> zngtaTMPh4ssBevradAoFFW@p;=)w<^gDcoM{zS!O%du}86XN`JlXlGL()BK;Bb+X>t|Y@L%ABZr zMe+Lmj_UWX>DH6ReZ?HCh>3b0?MzEf1y2{*b1CqI zY4bo}om_N7T+xn&!%D^et)6#(_9V%R$w3NLoT(@Qaz`&CFI)!@H>qvEc;p6a4F}R1 z8&)w#Z;;yB<)93w^6%Lpn4WP&>I4gNtC3CgHk zL;3}kk?|JZd8LIpK5Y%nxi;W*H+fgD6|Q)RlxXMmmM#D0Gp)UPQKbeN+3B|Qv);-4 z@v|;iTr-h1v;9Xh+eR2)LN!j^!p1qtVqP@zH}ZC_!MOpt(4et|muAPdI&9UoE_z91 zzQ5qK*V_#fmy!4kH_RwT)8}%+kaM zQ+qhq3eBc_))n^Shest;Z|`J9lZmaobyZDbs`OoJt|^g6HM7#^_t9N3kpxBCCH15U zR-SuV%w;X$c%1ZCZKk$8Y-fuOPVrYFhmXJgbQH^>z9Sc%8Kb3JjN?0GhWx$)wLq?& z(N6^hiCKB!v^IxoXaTJy$r9ymCe}WVUObo8Bsu4T6Zj?e`K3`f1r_$2Pc}N2d*G>3 zSN)z~&XE8M(@@Wd@i8Q~{wL=~JKC`rk2JZKMCHZqX<+?fP8QjG`;?;s|3Lyji#VxZ z@L4+!9?kpSAC7Aa#8X*^@u0e!^UO3}5jGA|!9)$97Mrbeiu%AQo=V*WTp545q;=); za!bXUfo(J4=mFs9E=N`;!hEvpo#2w-{Hl_u*@@y#$s!ZMfookwqJLNYLBIQxVtfKG zxX{{YX>sE!^P6HzDXU}R^GdZZE;>=%%;r6=-_FJ}-{}nN-5=JGnexUoj-KciF{7Pj<|mM`6|88#{01yYR=Ih5iQ%6vsTu^oUQno^;c9w8riIL& z)ao1;(z+FL#}}H)j<|RLj4_USrX9FCY$uxKnNyVr<){)}0{{ld^Fh7#cY>BIa#Z#xPi z_=nPRqMrd&px><=Mt-)7Dbq*J()t$`(!>RpmKIsWEqWIvBt;^FZ2r3t7@WJ*9H-!g z*HD}Hgm_{a)?r5VyCq(>nnCLD!f-s5UhZg3hBD+Fv?y5x)jw&$i83 zXY2&0=}UL5;QYAxKKyw-Ja(jYJ$!>fY1IL11I;U>}|FeTqUY4Ot>&I)oYg zchPwX0;kNqJnua!+FcjOCj6dRnI16`n{mn=L)YoKhq*8Uz4gO(w-x(fJxMrfl(moO z{<9)3fykaggTBaLujIH*Inn6L3&aVxl@r%=sfpyg_|2nj-j!e1@)s|MbQ;LS;HQ=YLenvtwOV3g`6sY z2D)RwL*0S^_1TzW1b&f<;PzGra;SRIuZ)RF)WtGy9x0~k%v(hZuF(N@L|9x@rhQXi zCd_DQfq03vPpH6-m!UoL2K9A=?~T$EQ~L0oDO+ES=viZs43#!&G$ficCR>?5c0|5g!2~kc>tGFktUPNnp#Ygsc%#RHkj}i zv9Ksic-EpeXZe~XX31M#yNW@KA24Pv?TbslEGOf|p(e zIQJFQ9;3vy3z|NahtnKdokjZ7JFX00e!;;#Rc|B|9^rw|Wqv&+6)g8YsW($HDo^B} z?;+WAEi3L!Q{^I1K}#}cjaf_LCF-qQ{(aRjWs+k&=Q+qIIMN916>npllh&UsS=^7N zaPJr8j+ibmYz7W`HuusY(`_C@?jiNb`N&JzTk(UU{fwvz6=-w)3AMo#a~8DtPSg@N zFW+W6wf^Fo-;^dVB9zBIlc7nlwfo{q+Bt&N!4fy?wSSh2ct#puz?l5_v>9I;BY#df z0?k6zVgbj{?Z%^?$g*|!?Fr{bR4J1RI>?}CkURDybM0Vhc^<1=Nz~+;tUrxBHR$_H zXkt}Vy>(F(yy{u}`(>r+)hA|I?O?@@?WU(b7_@{HJoS6b|{9L6g#wlR%6X841Uh&MK5Us0kN*Kv(yABub$t|HYt<6dz!qrO)&Uzjf z?Pn@xE9#>uQ7Iv*$gj#dGA%N_^c9N@`r}%%7Q@m+1F3LsWfRPwVsk$Pi{u5Kculx` zYzFgP{9@lD{5>jYv`zt4!k9u#!wxPh6ZudotV5IwMfl{C(`99m;@UD(>jTGl&!&OGD7$uM(%?l6CMMiMlCJ8#=UBxA1YKvjJaLCm!?7; z1`UozuZxX`5pa5yoj$c+;@I_|lqe2P!={pwwT5-d6nkgPqL9-GGINr4=D9a(>(9-_ z_}{H*{USn`EtWYAHnQ(r&9T@iC@ne)eb@u@a6YkqI0QiUbrselUj0XQP0U)o>_dvIgw~^%d-NEL@vS`3X~NS3OMSr*2Aw(*_Dlx=#zK|< z0}BA)P9*JD2#ow#I03$h1=f}Q56aFvpoyjL z_wm>fRHS#68me>wfg_zjXrV|~LXj4F=x{6`9f1%?Xi^e7NH3u%5IRUl5(3hj^xpL5 zJkNdaeb4jWfA0RVncZwAvpc&p^ZU)`_gy;*`#qOlDC7`_7z6sjcm;`ug0o-PxWB!` zmR=~9okT5sP3y>Qr!w6fMx-d61NT+yHN_?!N{{`ubAI*65(H^ zGn^OpC%b6nR#Ea80cC1att47$p{Fogr!6yMCi$A_3EXsz?uUncJW6%k=P#r;Q<4RX z%{ifgwX-cHnzfpmD*YWePHZ9=Aw16?)w2uibqZKj82wr7JYn(5)wS&#kK>Z0uT1p! z#dT}XL;mc6$!U3rTS{VqZsIZ9a%L3ZU+e-wHGS+KSN%Tk^~rEXyxo~=DtX*Xh7R(w znCTJacO@CK7#bbi+S_bSuT6Yt{KPpF=fVDSmfw5$5k%nqKsCmcj?W;qh!z(}1XmNI zEUL^r48lb2a{T2vNI!x+fm)#@se>sZ6LXIEMQG>4FN)xWbeFh+g9FF};ExL}av2C# z1p1m%wUINh86k}V zG#{PPlPlbd)t=SeFHUaVfR*`mu5Vp-*g@$3JXUsBhmIvyDaTKJ50CWR89X|i z7gv>;C|Vl50|Q% z>gJG@G80Xd{TKmlPpT%3x(tu0fw0zH1pVHobl3f>G&{zv{1hx{!w82UummVqZhxD zytd*ptz?n~9TF<9rpHU2%G}^aIP)|u_$TEIL&O)8>rCwvKt>PrbxdQd?6(nulB*sa z(li_2Oi%ew#+Iupr(-{4m0y@%Veq#zMHM8riCWSUjo^2Y`gE*_eCa+$T-Yn`u;pe) zO*^pr!t|9w%Kg{YR5hkH=C$S;X@zIECBWK&TxSWIJZL=7w9#+sD*tpVPd2U-81t~# z(H1pas}q+fQV^TKncSpnKqZk9mn#_xEY%8tpVo?bW6-fl9{39H>Cbh_60A(dWgN9) zJbK^EQ+s(5*Gf=PsD4x(L!+X7a&>8Z^}9G>%2(t;l1wU-K$+)`d-cY9V|SAgH$!*> zg{{6yQ(^{`PHc19U}qZ2()8@}Of!%-GQ%Szk0|t z%;$?vFE)Rfp2psY@t%|9q(Ent-O9a2)u$?k=+x_cvM*n0YmZ_(Qy!DPmEKTu5Bvon znsX5KcN0Z62S@+B*^4P0o!c~oUZgtFKbYXfw*1>XSAK-7ew;3#A7?*Z)dY+7{PG^` z-;-xQkKv#z#8rR^&8m~4y=mHp$3C8j!V?6EbQEK5IbmEP{3bwGRpp>)OGU{rt|u;F zN2Nb~afji!K-#zoXhtTHuL~&x#=JX#Nz@FVhl}#>@N%`$OI|cpO;jWZ>Fc+r2-D^` zN@;{m+tJG4FpoeGW303tACP?$XL%4sa-Zy|)FG^zQSG_?>r?>LtK1mTcZ@R^rlcx3U^x8Ta)TmvZZs4Rw&Sn z2f|aG(%gK*ItXsKKWe`!0gIL@csNz0%I}CJH$Pz_?=gH8mRn{aW#UdRwIB6{x)i!e z_qZ*GpbZdnQ#zb_S7KOVX&wRqRAl3JSbx7ztLp>%)liBD$>n**SkK8gt+!)MnTZnl zn65KS-;OcSxoB4ohcB(5cByv9fX3Dk&z$-DyPz zy<~uFvz+}3*#WEZ_peMW>Rayv!Jfif>nB>!oWJF1yEO-K@JR+AmW=! z_sf1h*;JQFXM+O0}(??o1@GDWyB9sRtuxk<1NR+s2npV%!zKjzsQBA%*gFB$9 zry)@Pq}J`v`mJw{5z}>Up&Hz1*|k)!1?@{tN%iiY!>l9%#5c0KlP)&s@-e!gbQRa_dL1GOxf1J{D7#-p$ zrRHP2tWv<+r_Zx2BGPDc{&behDk3$kuhf3ov_8Sz zelv?dG&sqd*#1}2z$>0JAwd-pzC7#F z&73b@CnxBuujS4VpnUO9@r29;UzM)C)65shW63F*>CFrI;B@&egN)VoL5J1xk%?&L zC8FAK_mTXe8%Ye}{@m zT~ieNkSunyd|g2G$$A5HFrNPe4$fy9lTG10caX~%Jc93CW^0OpaOqYBTzOER3R3Km zJCD4;TIo|r{MQAR z3d$;8iYr0B*SIJqdK@mVcQ<4YXDa!;vF3^r@L~c<^E=k-I^jvv+IXq?)E2hUvX-db zT?&UPt?H)I`~sK$CC=3>Jpj8VHk};75F+!mdo5yRSFGO@gqMA;wuP?OE_&R^BRtAe z2quYY|4l_Hz8-0bdVAkcbvh6Br9pM(jj*#Xh4tPy>s55XbH-w3pM`gPE?}<4^y-IM ze6ZsL6Pc(qULNTUBYxu|7DTQWug`h$+Rq>Sgu}{B!6b+2qwA+`DwY$5qujaNP>wvK zv=)~ld+pT%h+mXVDxTFU!V5ecEixN{bV)}y*mBOt@?cO@#l$%k$;$uI%5-|M=>9 z=)r$Cd;y=YSYOg)d3NRM?p}!xm3|TZH$6Tqb1WDj@f&cT@p#`S;*T%au64KiaHxky z2)3W%hRVb#4AdRc((XFz#QQ!wCOWRcTE@*D`EDsS#I%CW}4D%B*6 zOq90U5ZwDX;gij1bE>n06m_zZaDu>TF+lB{;bm#ui3>|BM4K{aDRb(S9S}A2Q;knp;`Fpk5*Y4aHCysAL$#jn$ zoQ}##I;{r0G=go62IeJa=8aOj){~}r*Rw-r{e_hc8WE_tW}ll6$9|#{unQ{RJ)#(~ zdn)`O-z0SptpRtNWCsv!5wJE)ldDQSH`dWg;xJ6=q|j>i6Y$IU=GIUUU1$BoQUpxQ z5;jtjB=1{v7(gwXAe$ilXq=wtBJr`70>0)9{sa&e`~2_f`n%7Jrx?j~6O}ay(*cV5+XeZMa}{@wt`SBt?U8nd^9LS?@KH6mY+ z7gRZHBCzv(s5Np_0XCz$oMOoL-B69_ni+Fbu*8{q+654$H9Vf+P}Ws$tgK?pBUFaF1HSrJ&@WdoU)oHBGX~FG&4>_RPc?U&z0s#r+`qI5%kKt{l}E6>qAl_6?)F zvs;WtdGr;19cYh)=Q)9}6~=eJnk!x98fxV4sPr>kk3R{(A$pa51HLi3-*p2P#9eO? z4L7K(hs3^q;iWOuiuf`}!P8LWbf)#oz0fyhC&s`PUF<)0D6(bfob)659RG{bm7m8z zZ8A_l#A=>OD5V0Ya4i{^TUrAqO$jMKkpv%E=!;G*2=|%6!uI69T@_IF3#h6dBZu>} zS)v*pFd$E?=R`&|S$ic+GO{8W-&j?@*NrC4Z+K`3be-S}>Tp*`rp>%ia*Uz*_Sp6t zFPLP$FJp3Rb@b_@2(8-8OpewL@BZX?m-N;+kqga)>}ZdLjU4L32HEzKk`GpKaFW^< z_3~qzg3QHaJzNqQYbB$PsrFFE_FE%qW(#Sa{D|LxFErvh9Q! zG9&tqoHT zrJA8o+Oed2w!j!-B(XJUB@IW_Fm67jNSA%by)`-`ulD;ky0& zOr-0zS9Kje(0P_|HN!Ap7(0&_Cy6HEPN$5My>qp(n3`(4Dzz8|lRQ>~s5M8d&TU$o z&2?4{Flr$#PP4pEs1@lPc2er|~TKkZ#>wqXWUM(8KiCG6_3KTdm# z?GYcRVS7M31{{?l5w)%F@2m?DCo`_L4$jWYiygb_MCDh6sJplOU8VVPtqU&s1Mhe%I;jo3LiUq>Y~YkMmw zZ(XaeQgzv4x1v{FEVxG?&eY%4L|;Kut4C_HQ@fM!N~}ess-&hh9-+TPIT};~BdRgGHF@Jg`qAPv<4d`>>7Z?FADXP8dw*8-AM>TaSe$1_o#3^l9e9lS^d>A-#iS)xmvlvh_8@ zcgZyxo9DJp%;i%&H{r|+UDtUoUt7F?Cc1W^d-^yrxBaA(Em*KQ-Vzxep27 zIgxY}P+>UF9V9H-RlB6Q$ihAxMx0wu=+13F!F<{p2QwQWvK=pI=|29rlY2U%(|IPr ze|{+a10pQ>rnr&Yn$Wjrz#!GI6&YJySHYu`CKUMECpDkdio?8|FS0eQ-YbH@G-ylI z`|ERwJ$5r^+m`O5yKeOq*C_7^!-IK5{wugaRvYac=#ryV-|K^YjP*9%JztV`f&#_j zZ-6K)jk&+owzH1VnrSsJS@ngYr$n{#t}+hhUAO>cx50~-hWg)|q!SqzTNW8Vh#7Sy z7>}pXrVEwcWPst3xF{0(x<*>D)u_8j-uPkKsI|r>cXqu=U99u@*Xn7x&Qjp{7?V#L zh54$9<+GsQ02wb6Unh%65jA@`OaH<`lEA*sV7ZNL_%DeCP=ND6#Jw_->de!`p6Xq- z``+E?%pMVjHh^5It@C_qtY+mG$4Su;-_+ChH*FUv1gCQlbDxB2AUfh zs}ns;ZCea_dEje(;7*x-*c!;px5()TP9gVYROFB)!IP)43}KnjycqdNw3y+Vn0@%? zG(M=fqTsZQvqp&>*SK^?BYwTegB_sd1Pva-c)x>(wL zx}~9L0Y2KS7zxDb1ZI4E*SNBYEV-^fErZK z=?uTy_RJ8Vb%{hX1_J>8XD8mnjUV+g$Cfs-uKP!4E#I;`O17 z;bLK;v81p5RXIfCv&@>qP^36gWpu=>s2hG*3?(KQuO|Wt6S2*7&nAov2M_I+{npyU z?&}d7t^>iO#Y9% z*Ni{=M8|BKKakd11A36erMxidypP!6TKM62D+xWxG>>g9eO@djFY`mZKowC|JA>ap zsG$_T4p{QZFq?UHc-={3>lAT*JU;sFsg3cF(aTUivWZoAQR>T(XoqWA!{zi9f_69a zUy?ei=kyo4Sm(XruU|CO)hWW&)0==p3B1@1j@$*lS83MTyRxHrF^uHl;|dFZ=F1qX zD6Rvn$2euHrFOWc+92gL7Oa#u=Gy+GawuOz;Y!`1Xg-PCPDsE6jAEY8sOMbVHXgy( z?_S#TIW@tC=?hWCd-)K)oExSp;QCo$ee-Yt9Z~0-^*xPQv)%md_z?NW2a&GrW1BF2 zvd2@erggZhn2KwiL03juB~D^E#I)8vgE)6ns8ai&+B~HUD89s<2QsE)V_2d@2b1vI zeA)0#ps%k04xGV*nXrF0$(D3I+W~dI9_(V|Wj>M;bS7F*<{YN^p#J6D(_FrypgaP@ z5+AA|+#FYp5iOhDfrkYfmR<|;A-z|48{0I|U^Y4k&pnf@c~?L(GB+#feZi2c`?R|- z+U&(1GI`1UTQLJ2&wJb?w?AvP4P{Q)qTt&=L^f4}YIL0=2x_O!M*D!NN@%rnWo&|K zxLBJvUNA6o0@;NeMcv$`n)F^QdSW0-hR6z|P`8!&>}2fu!=>5^tah3S9;JSxVU8Qj z6Un8{I?pIL58l5!le?Qwb_=Nb9Jo5NJbLYR-#Wl8%|DK(f>M>lbIhut!lcxbd8Mt> zNP;KIehu+WUoNEhC}x zzzhZ;hg;~eYkne2*~`lnt}==!&Mj53#SLRx-G!ibe7AXyC=g<%bij!9qvuc&4O7B0S(GTRjI8muGSBl$&L9-%7iU! z%e+?kIh{>=I#3=$^fr$2ukn&3n8fsA6g;T9z_#?~`*y_QHbXuLxM>Mh70$0tn@|(2t>VLt7^llUJyY{n zh4gk6EL$i$_)wW=``+WNpZSki1KyK30(x{>A%B;qUrRiA_M|P-otUEH)9yT;f|$cR z!lJY*+~3$TOew`MV)%R)6LZX zSZ7<-;IP(NY_ijsZiRgj*&>>|#NMxE=`x*Q_iW_5Ek#CsM-MU*fft>IiKRsuerXuG zfAybbQ2ys8xk&$Ey0=?Lw&b+>M3V!v=a&U9sThy>y?t!^XQMrJ7<%P-(I*YN8FN=2 zxmb8)s&=|wTvD`6mj)iZ*He<^&!BrZ z$77YH(G3Z&plU#c3q*SuaU+S66U|eqcZ_;l-d{|<D(tpOX54e_NJRSeZNXqFU#N)XqjG zToes%5ci>(s9!sy{!d{umaV$?XFwy(dpnoAn-#b?^<+=l-A_SD@jv@VM-1C`S_IEc z38)rT!;!=!&B|!ru`~t`PA3>#BI*K#fb$6#{JYp~Xb+!N}SamGt zk!0myJ(WCi@6(uU8bl)s!3$fPzz+q1S!x7@X4q}#mc5a!jIzF*=U+1v6O$?zEaieK znm~dTLt;^kGrKWjX{;tuP?Xhf#fz;r``#5J6KMhO93nR3OHtkuf9^a2XMs{;Kth2y zk3j5JT-|5aix-k)coc*3^{q`IFc|J+jxFZB z$YKT3$GUz?R$mR2tY}P>;*~ejFF-A#$`-lgZtzPZ2Fl=N@kz+nxUu=S;{&@-KdmZR$rr0i!n88}J?cuMIcp`h$!?Kr7IQsIR;JX(0QiOcxe z_(b`xcp!WfpBH$?%_0G=n&ff%)mceOCrZl5duBIZXs;+0D;EN?=2c>8ofm2$p+mmZ zEu~$F+l~>B+!hR__Y`2dDFCbX;b0E9-vLakF%suVh*qTqY8#|9LKz6GSc$e&H%hUO zvF#_tEb~a+yO0JYQkW~VI*-WvUb;VMv7){#9ekp7?~NKaKdcK0VFNiH^Yi?Ioibj+ zW)qve_fwJvj7)fQ__JlF-Y>l^tf4sC4*{Wk11{CH5B?%TKDHzwvs(3*X{uUuX4T#D zJSkL`euR4zQhkxgj&kw<7C41a#HuH&f_np00f2|(2S`y65y)ihma{lj^D*8tA;Fzw z+neKzwb>;5fPJ+g3{y-3xoI*zbI;x)-|X4;rA(lIPA0X9#gXxZ7vXC|gQdwx(O9#R zFRgq;k0Lup-_UswBk3m2EZB)s6}8(wo$&hc46&Yd%AcM6c}>@A-MZ7&+Y?1z=!UbN zWTj0bMT+i%N}jEPS0n6{M7ON#HuQ$_ee;QnQ;^}Y#e=$djJQtK{m$>N>SWa=TVKy` zh?uU~)GS!Bmw$R~w)1n2@Yp`OMx3MQ;-<6R)y@Z#EeWQ6EWLl^7sk3yyA1RM^~DBu zY8N4TA7{7tHx`g(EyErRA{N%l{X+iYE4C?!thtYqk`Y(cp@{yfqo2qt+B?bdCxGGV z7xIPquVqg6(|;w&pa0Q~w0-#>9^n6WFgbZ6c@y(m5^&x8Ak|6-hI6KWjU{|^ADQdF zM-s0ed|LxHrWbgCtzI7!4aDaxS;iP|+m0+sSIQt=raHx4IoQXmZXM9@^W{X@fhCfF za0CVzhyiN-jF__pXFYSm84{3I7++_6&&A#Ey(ZNy+F_<3P`yYcL`bY8*NYKX$ICcI zY)}Ufe+j9<$i<~N6ZqoTnFYR) zT>8``?DZ9@CqNv(E&71j`Wx- zYjS<;8u66ujeQb5F|u>nSSCxcUcZ3UiMCkIjkI_2$OmAa8X!0vz@*Ra@N#qh;eltG z1(w(!3eOx5hHUOa(GC2zSP$0SX6oIt*G>Rd$AfsTzw3Z(YMu!^FZet8UkhD6v+-X5 zGA6#;T%77Lz8&ROu+48;cMV$L&joVD%K1RV?8WSl49V&g(h*pow)L$+d3IFGTHgr( zIcSi~)#={_xmz)CHs3s|m~3OXj!4$}nS9Q@u-I#DTOMU?>)kSHkoEqh3sg3UX5@}j z#@v2ON}Z<;vXGJ95$< zpnq{E18^k;&G+v@|CPG2mAgT+d;LS}(@MT9o5J#-RvK(e!n5Cigi0&F5`)VJaIrz{OQ zob->@-++Gz+;u}FaU19>sv^wnCMEzw$49u12%ivBAgX zDzj%$!byU>owzJ{qn$4JD*7i%s+Xw^lYe2i(dwt$f=oNAyPgzL?R>xSeQ5Hsq2!RS}9s!GGVOZb+8zh0Ur?O*!-1>gXJ7ynhMH)`gc465`07}BSAs@;LTUX$qk z-e>8#n|1xk_oK&JOwTPKAhq`9qN2j3n~{)Iv{U2bu%#`j1v;_Gqsr7e98rfZ-+`>@ z0=0+_`@Ci6$lN77-|v=05SK}Q9frR}(1?w5qfX*qOT|cgH_yi=RB78$AR&%EL#2ASk zPaUCgR_hGrWaALbJqZk0H(RntrF}N+otL;i4k!&4qn9ke~L_9O||oEW?D;o|WspE2*AK{F>jfc2=@@-jDNDI>`|{nSgm zhsIObpC_68DvFp&&LoKcOdM-}bv|k1(%%2HYK>R4ltjNuqaT|-3wgR0u2P$hOVFv& z*{ibt`tU`allv~Zbl9}=+}D@L;Zes3FO^ zX=r$9-8@x)SlBXjK(=`pppNG2_kg|74Ssc=P6@eN#4x{V+QfL5;4xW^Mp6GBEX*d2 zjKg84-TZ=IELa3G@CNqdza!rV)eIPp)u(4OzjgY?7`YeW5JihiA?N^8P&gcECtChn z8@Fxb?r_Bp*GDO7$+(_qkr>Yrd1^W_^_=E{42@0s2i}bO)SISsWg`tc>%&Qq@%r6E zEUVft`fsakh!ct~Sr@CV#wq?uDg5*|R*l}0>2-m{I#(J^iDA>jJH~6t%0&Z^aT)GhhoM_I6q4vminYd<5vRqb#9ynUbfDN5gzqZ=YTsdT$xSr zy7TL#?M32Vi<#$HNk68(hqyf5dQYe=5(RQSf~VRYTT}3xost9-v>p^E`=}jECW0zC zedH}RMVxZ2`VSu0uud!%jf7J&o}iK}CPTQEclwH}3jMbE&efUJ*0D7Qb-_+nYj$qd z24-@uro2^U5kuQZ`Ce3#Sf?KT(mjX!akxk{e2zCyOh*AaN^{*uvBl*Gnbs^`l4Q!A zHzMG`??0lm?#xKkc1waZLIQb?RihU(BpknqX^YoaZe4GeF_?GxUh?r{{bsnBz^6Cb zb`%MQmNbloTl&hbBEBB6+jr|ka;YBaR@LR^dspSlbW#gZanME=^GF(~)voZBKWfkp zA4^&;sZ6ij6kej0j_71;{^sHr&{#M=`kYHA@#de)q9sEeo((?Y>8K?%4=55>Qu_9* z32jiC#iXN`<={oH)P$jYSc)#z-|jny1TV61==y>!YSJwV-^51cU7F@>jiMzM52 zDc?B!5M+8O;wn?|WJ0it_3Nj#60rp1bK~&H=U(aX2wRhaAjI?f9{KFHfS>1E%;kRH{@Jn6A=G%8GkR@bPMXSq%D~p6O{a>ITiFc4_KMOmQ^w&anfgKtA|-rp3sG0YMW8 zjI(u{OxNr0WpbqTz!F#ES|imS-y)bwpF&f;la*IJE}$^QbTtF4{ZX+;0&wLW-4Al} ze*lLJ0j}xIOUxzcVuf73)WaLluYOM3A2m&ByCuXnr9HCdRw&0!5P3>o4Q%qgf2E#n zwds7Kj!s{QrR(#{*{NXem#NLSlom*JgdhH>7A&HmYiC%SY~G8IQ^mbK4$?qa!eeUw zu+(3RzHh(cv8rsbSAli)P|G!&__)7Gub-YKJTv<-V2U8$TC>aH{Ol^ctS(*ZoXxxn z9ji|>%6y>MTTpuN_|d>Q3yfjx{!jWGtCViLb^bN&neusuaoF zhLbwOTY9JV7%aHv$t!XNuUVm-#MF$Mg9{37Ybg2u^HB4kR4aO5UKfg$C#(XA;b~I$jDQYm*?c#3+nbfmh;PQ#;^+8O^7xxQHpD> zS?d-!E7Kz|Ph$^C1&Ghfxh=I&v#tvacM4@yvzy@U|F+|TXxSr_JQ8=y& zLVFi}gSZ0KGaXExu{*D|MZV&hkXow#KEqX=^m>i?sZ%BOW&kR>8c(o6mySs{y0#ZQ zqyH>HtU02Dexol->gpy>0~B#X(@Q>LtW zzN)R7nYFyoJ*gZwjbgiZdA*f)roB`xdZZKs->HFRBI`r~m*HexXK~l#Un^<{F?Gae zooF%*zcrGyWUk#-%>KP|ZjFDnpdtE#tDt!jP228q!{`@7=*McJ1e6IR@M;jq+TU5; zOb>%z4S@ti97mQ?DLdRb)_Xe1=QPXD?f9bZG5xxNi&~dbb2@N=(6}N8PrRWQo^M_h zXuhXK#yfqX&$udBOTBBO>I(}$sSI+3F{Ai%Y$GCh0>`C`(}=jYo`%FL6_TqdW~Ti7 zJih^Sb|%6lRWm<{DR=V>K3HfUd^)fab(-Z-QJ<}U62ACp5ParE9T>U`rB-T$o<7St zR|AI4|FGzCTW6qv29aFU8s6JI8-SKGH)384501F!2;}w?Eb-Hs^J$LSwqv^PC(c!x zvH^Is;Mn+*^MXc-^psF?=p5_FhL%=Vi|((_em<;_{PaMC_ULEvAVtfPNX$o>}D>} ztjf`(4P#ifaQsW_^k`XRiB_Pni7y&KPdL!RyXwt#hygGY(ma`gR$6W}lE za*nURJiLCD^93zk21Gx0MQRw*I&qzcB!G3gm$vt+|WA!!9Z(YwAmZ$Capg ze6o|ZEe3W{fQ1D0L>VA_Fg34yUVU)QmMo5(eyL`!S32A}pT%sC_k;R^>@XB)XI0n& z?sE5eBlomHa$6@On0RZ#pbP`UUXH$=BjuYrrzDwp!uBoqc~^Jfe8WJ$++0(w zL8XkP9+qtgZnGDjFMu#K+N3sbwjcg2L>$AQ9l~spA`z7+9V1O6Ifo?rHngx>J|lZ||D2_*U-Vw@%(Sb0qh{w^_=@ zN4+g-^}@j_W5V<4*j?HVn!>!HOI68W0f#rka=n5N#v4AlnOhN>TqEiCrTVMr_Y%HQ zm(!A@OvMTk!_LU@9&U4N5r{d62VLK=xkSamt5?w5yjo9@ot{;xg=XpJi1i^)80Yu& z&I7XB{U`l4eA5sY7RQYE;S_Twy*xb#$z%{y7bU6v0MOj~kG|+i?A{Tm?sF$^&uP@Q zaRC9Df#de<$xNQB$d&4SYB<}W(cTfSR%6k0KZ?gWL1}Vx2J0~?Tu|AA6zH=bl+%9h z!V>Qp7N2jPY|*k7|M1c{|2LpV^E|h~pb6NPi9S;a8Nt_i5M@KVt~cx0xt$tESsaVi z2op+Wcq$Xc_(LH1WmKn1&)u;ElgSN^bhf4i6B|lPOK+orvIPCsH%$}g2@Fj?1$kX| zwSx2x9goXtUIVE9wEpvdp9bZZ|9d`Eev=>WO8$`ezkA3j*c^l*_UUf^%We`=O0dUF zu8$@5Lp1d*)3zslN0pM&ulVP~;!>ZJnGkTF#<5!dW+IkxIbCb&fD%Snk^+wZiu96X zFe)+K>U!GivUIetGPJ(R3ki0F*OI5$gEw=FFDN}mOX}Cr(sF$3UXimk&8mMciKO6rcVf8=cuK+Lp&!PWHNCo;_l4pr_3S6}n zF|#A;5w80NSl3r`4Gr&7rZ@}Nu;3xX*%yUG-CpaW%C|GYxrPv$I-`17mBR|D0KLx0 znu^0z<-oA#Os4X!o%NJ%z4XN5gD+V9H=icGVihfJ|I4`rc#@Py{ao+7?UUTW-sS9%D+08u2jYG1IevGROsjJp&_~%lDnV|U z@H@}ifypdFgm!C?J6hwZDQBRZ4hn}Ao@v+t6JlGwnH$;Q_8p6fJq^Dg3;~aig`KnP zmwtFSgd6w3Oo}Kho;$#cqHbq5v}w?tbkIQhXTYSwi++u?d{X|1-q*&ad3p15_;A>=1dwe-X}CIYVS{{ zn|D>*6|Fn5CSc=$W$ukK^IkQgGTmD?`O;MZr=|hlK*7SYwO?M$YBMF>{Ij1;7$GAl!^r>Yg@x4JV{&n2Ld=<_yrRz zG%;jKJ9-?0=JtdJVoVi@6#S!_XSO})iGiOkt99{3{k>?ll?HahJ_GK|?PN=3mZqF= z(b|vczI@pQR@;8~qm0;HeA0)X0w(ke5jN&W+TvD4J*`xoiq$#2ECf?!n7(qL+LPig z<6`)gc7vCVGEFwv!XB}s*Bm=(BfMl#WX(3mz+ zcB&{$Qy%M-0?*`E7Tv;6fv<{Il(*e{yZgt>nN3D$$<)h{d;WGrYksNHv!1PON=t+g zLKKGCysIgE4#^tSjx5F{0GY}7M;+}3GCR?u8ar)7tyfqS^CE`fepzP!KGIlVH$<89_lVrK9gYr5H3R_;!zuaEt5)ElBT% z;VyqV7$2{P6?q8@d{AvOzFb%Z{89h;uYc9of31SXM3{}6RBgQ*2id5oW{<8K9X)H! zcP;TTfYhN!4D52%mww&gSCqb2B)Ac)*alp{gg*z$T>h~zDBtD(iqy8Ip!k%aFo_~| z{cMDPnf)laHSX5u(k^+pxj<_mBR5=BxGfB)&@pS|OX>CGZTb>9>Udi>2f^~?=5p-T zfRW+PPORofz?H>_|0>Y``}2=Xbifw?^F&&m$q=V-C3UJWY7gJ#lHamXo7>~xMzfX_ zX(}VtYCC;&>-ar-VahC+Ga@VEmHYGlFCfkdXqLDUdhL9zfE=##3mC5Ye*0b>3AnMa zrg9{&XxH7EpzD$EGNi!0mzEGRfydMMnj;UVuC2yDvi)9{U|*wf_O>utiRA=j6X%B~ zwb%XirkRmJxwt*ISfw&i3pL#rIejYboAmy0h=I^T(He{OZc*nM4Xm z*LW@odXO$(&YVG+@1{EXGe6!{o#O!aq_I6f1E|YJ8+-U>#BZWzyBt}1Jhrfv#zxQ} z=li{FIN+Q{dTPd6qpm@d9dPxo>@{-kbmbJrpClG^W`=Gm^_=p2YoDN+MTpV1J&|&u zq2uCWBTIy+^!>yd93%8P324U@4}=~b2!b+V7@?r=o?8vVx^YrDLMg_-g3@Ef1w5y` zZ3=fx-1uw?UQ{2Y;`b7X5Yvssw5AG(3RX;LRJ2i++Gsqk5D^A9Xc;YIL^z%9@>zCI zpWZ~8P$iJ&^qUOsaQ(pV9Ik*bcx`N=c^5!O23ajqN7vg+Z9}A`HK*^sCQL|rj|Q`7 zZ#f58`-dhl?cI&8PELmT3rzePol(fbu^0Gc$zvE?a`CbA`bwWF?Zm>5V(UDbvxi4l zmxi(h%u=-B)B-1L-J`z&Z7M?;M;-Z@p*geB_`6>l3oC+W7>Tr!Ns_U9=A6Uw=s356 zHhL6CpB9*9_BX(_m6LhCUo_&gEo)fwhYEag+&r70Q6{}4jTbu~S((0Zov{~KM;31E zBv^J5=b3nGTe8Cl>n!&g$5=CwETnM-e+NfQub8C7=?Zf9XrUj&^k>Q_lF?mql+gJ#HRM<8n zcI(n+tHN-aO)xIgVcn}y$8+7c)UefHs8UMm7E5-fvzg8y_i^YTtJcXnPSF-#{#ppuZ+M z?D%=~;`~uEj0MWX>6!Z)yX@JM6&3hMtjNxkM?06T0=zL!iK?S59f82d6`iFBZcGm8 zx{G%EnoDMoajQoMaCp>?F2-8PdBL%yvg?UO}<1N)B zZMX}a* z$%`c?)d;OH*e<%nT&p&W_Ok^+`7WjIndRWHOTZ8h+A$G7%q}CYUXW&_efYdkYo@RB zBunr1v!*gz%4LFsoO3^M#`8XA+Km^;GtdYdsB5mFVI(Sdny%cVO^QqG=&U{%di{sf zgZ;-k1qZvpq}wjblxpk3_yQ)Fs~9#*0r zb_D?9<>$vW}H^&R@dFxcurT!&(oMwsRulwX6+Us>twa#h4jMB$5oP4)g_%* zV|Nzp<2#{i(m!Bq$qOd>W8AERXAO!z9b@@9>3=$cM#BWmV%3smGydJ&wWWR^a~uhK z7h&d6d~rLWD719EOhDT0vU;|Y;Xx>ss@%fc!%W1pYn~H0)DP|C{@;L<`jU*+Bjr00 zis89Z)uFzQgQ?rU9C(X=g#Q3Aa6b5Vwtt|jeQ%FkDU9WGbTFVHoY^_+EDCzLTHoVj zn)geljx_@b;qWYZP+)z>qZCmP)VCI)sTkdYYxO2WR^EEE7hkTw0hG`FW6ue019zj^&8L+cyXWN@}Ilz{p0ZKZz$B)giO`Z?L#U;&U<_W6pyppgsPTL4b`vPS4U4& zYRfWeNas1T=Q-iO0m~HK;|lVF-+u!v8r7s+#*DlU*=;UT(=CIB9d2r%k;a2!S!2u6 za;cUrBKLT4^G}9CXtECIMt_aA+|YfSmN9X!!}lYABcrP9Prwz{SU6%F6HG_3BW>s0 zb#f!TZ07JDh(0q@lGSp+gHWri(Zlj|aEGxVmG&1tmnVhNaDbP62C3UY~=cRK6YXt}z_eSvTt@ zv6fgQm#M0%{xq~)#n`dgLOQ2x;Mn>h!1elc0N_d~6=|+LOs(c;{vXHA`o~V=qlXAL z_Nn$I5QNW$7fhrPiRe5AS-);CBTlNO1!yQnRJip$xI{T9|1ZwoI;hQUZ~tcR?iy0V zt+aT8yA|4E#R7yNC0L6SAQT8L-4&pCaS2ia2^uI434s<5?i5KV?ocRFv~SLN=9xMB zyz`rPe(yhlMZ&!(XW+T`53`qpSVvYmMD;)=QfTAK4f>WaN{_WGFW6 zm!dG#Qj;!DG??l-^{Kq?-;^T?Y06>z^h9#u{9SdE**BoGpfSo1C|fsnPgk8WPcxAY z*tcySo4MoI-1zk7%RFeHHk0z}vp2@F%QU-!M%!XFws9|rtg?GMo5m*g>nh#?f`TSU zg27Qcvw@eZz+sdfgX1wL(w0rh%`X$jnqQEV&yydhx!bx7652582SvJ98{^_wGuM$F zSZ{CrgN5KPR40!xZ-lA6-bfLw7HyLyV!{LJoxyofu0L>(}eC*nFmMUw7 z(AKn#gzS0AG4$o1^&(fZ%wqk`Cfh!gg?{j6!tun(C{YKaQ}qq1+`?Y5<*qP!bk10^E?T|Rdk;hQEADS7`R#q8wIMRAZ5tb3`;=bna*#Sre{CT;=2`FU+dF5rg9Sy=YdaG z@@*q4tJU<`GR=Ag1b@gBVsa`!-Hb>!dS7k*1TQ@}s^+u7wlL~jE{Wkz8GUb+GwsT` zsNkCMXV1vutF>;yd|qRqBTyX>1EFLNM4ol9Bwn^bOU5g-mwR&$<^y5T>f(twsN(?l zR(jXXdQd>(AVK2c(r~JC9@r3{@G2j1q_CWf*-362F}O9rqtBV8L9^x)V4c_~RJCtF zuRR}p5{JF@;jEeF#=(adO}P%FlQ+Tr-qYzs(wt`{j_X;yh6_bq@YS*0T)TuqlLTr$ zOGIT=vudsw);&$W^_%L^I(Iw2NoR;CTS{WBWUs=QnBDW6-x}GuL`@xm_df4wSHaC? z*M2hSb%3sdPQTAY!OeY7HAyzl*untNQ$V|}J6>cu9^Sb670q{m(1q=+#!s?8A~I+1 z#EA67sj*s_YkqZw*)S`_vQ^sfa1(KYMf1}7rjZ5n_BT+-80#YvdLWyI<@CHGn|b1juCs`!Soh7lI2ek(9;2|85i3= z8@K7^K^qh8i^kj0p=lf@Mdv9gffDvLgC0d2hky5lCtLe0SDnJyCUl8#dLg3ybhZ>f zMS^D4X(G8Jqtwa)!=rW~;;D2eB^d$fv5p&Xn@4srZLiHI`tH8S7Yz!W((sLJ)u^)a zX)M|2$WUW-&~u2%(5>kN1mt81P3gR`yMOaCBv} zZwFL;?DFns34P8Uw^cqb&di#>YL_1>Q04QBdv4us>>YsOXW;<7)QvF;hBmtI>$ia` zim)Ud&bv@P3cjdh%RIr;YR=B8zT+_To0v#?sYDsKk5u`m?RAJ!z0U3uCX=sxhkwB~ zIdBB~nC@E68$S0FJS6+{;vr*2FOstLZOBu-u1jP&8tnBy{ngRNGbfJ5_GG9=?;@)J4tEd{=nWU=ICw?e6u5wZFJ0J0Iabidaksgpc^ZbY2zu z4Lnz|`}K3ErB3AjihqP$w41lrag@<^I^*`dzRW@3Go=M3kEjWp^jdbs-@Cb57Cae& zR?L2RO;lI^?xW0Q;+HuyY#s#zKLVj1=<2sq1%*c?y_PiYmvYF?-&9DZpKZg|_w$TT zX>EhzjR_A~rEAIW6lkPac?-FilTdV$wxe5_KjfC-r>&EZWX`WB)yo($WerASmPMtM zMt{gbs**pAQJJRy`uDU+x$=LSu>apa?f;oxuZ{Zq`$ieA%iHFQn52~E?Z=qjTdOvN)(=&0K!$53LcZu=1C6ywd^ zKIa_97UXf1g8eF|gIM_7T}+NOQ9mqUJ{J-b@P6}~DnNzTmuj)A=KAV&p3l!JVwHgG zo6w1tCuY?@;-~I^?Q4p0lj1fWv~J{4*S)AzV?a|}cqZUcl>iGGdKoX2K-o`etUc`5 z=b_bU&A6;nwPcxNCgST0A;fan0vi$k(S@a;Jwb1|1qu{qW5DR)Ed(QD@{+)mGdl|U zGVYB%!e~PYtO)PN<@8O;db})>HSuq9FMZXjWU3Pw`w;{`G?RM{*6?HHjWR+u8Myk6 zd>8hv==A3)kP?QKt8|JQ)swVzJ3G6WbcdU49cHr7H>9R z=J1H$*9*3c5d!=&g*Z$5TSeFMyr-^&80Ykap^zX0ObS=EF4G;W4=*j03u!G-nBb4C z>m_%w=KL8&(cDUpCizVC3l?r1nA z1?mW?29d0~j50e3TjqTB!7H^kg*p{7mXlK1sc`lA z4%&Aw%#dWNX^!VG6iS=oGWO$-_jN8;A1h5gPhs|>TOz_vlG;lg2Qw84@3-;i={*KJ zdw}FGe^u%t?<~5u1tys$E5?`3`$s*BP(KqAsHIq+S=7!m!ge@bC1rH`8Ho1sDfcKy zhF$ovP_TdK6Z_t67QCK~GiH}1)WtXLdv0hY1Xq_q1k?DHClkA~aeg~HCMvZSdz{Lu z`!4-nCv#Ga6SYO_jlj!@4*5ymW$jLC-S(M}tBFe7BE-$8ar>E1k;z9o4zqc9k01)B zbmzzRI#5F^lgJWvhqRDhyZ3>9#34`sw(gVtIdNQ5bqfb}HrU3ppd7N8FMVcRS{|7G zqH(k32&#=1WiE@iyw?|(K+xrIt_T#%1DE7fTl{)z7b!k*yP`>bTT)WS6u&FFayAxY zdgdN<5N73nG+{l}^ZYjzs9t)6cFf0pdN&d-;X0w<^a_XGX8(@-%R^47<@SN^h z>m6hI=&j`Vi=SC;xmTeH;`)X#N(XPE+@<4r$$(LlUfT>w!ED17Pf8L?6#q*fD{9=P zQ)bx*D*-d69*aa-WsJ#au5bj@T~Jj4sEK;NEMUwq`#lBVU)+F(5I}^@ABOME`REP9 zwhO6`Jik9SV~+i7_MN8f^K$vjRS`Nxg-HEQ36`UhpVrGTL`tL2F~+I)k#gl`s906q z9@t9w&}jX&aLJOlWfpYymz!eiJo=uW! zbeZ*LRR)9~;vMB6i^S#mMnhe^EC%)Tu?lyVRtnknXBlQ6X9-4FoGRt4Esouniiqr$ zV7;epY}mMH&t56TOlJ%l*VXq9rR2L3MAsX`Ni}57{3%}VOBhqY^F(L)8#5UoO z2{!_wOZ@~>?9VlhjRj9A90G>Ne+u(_0J{6n{Uwu}Fd?%pAi$JLSSu=8hPxB}%3V`k zClCg2u8n)dxVO#&W<5H5s+2CsiFN&HJO;^>Tp*gSrR4)flKJ+ z4BOrv-f^_gpJ%&=@!un|Jv7*kVnK_Ys>bY*o6}!dCveo3q?&3-7F?;{AKpvoNZc?H z4u~hZLq*h0vA&-YLr8?(5|!-NUy)x&=Au0r7w?ipwkq7~{(6o|xchvaTvBE-=(}XQ zx81k!`W79g-?82hy1qfuT{J?eXdYy?>-(B}^n0uK%R6e(2%wU{fzhL(ZsXTna8d~P zrzzH%51U5wZ+**>5834?Q@bTwIRn_5AH!8AOGd zt$?{Xhjv$(YiG(~YzQOvruA0=kA7+HDL(r>B$|hVxyd;KmZ8~uVLT?g$nCKxu(d7r zL8B(cs08P@UHakVS`VeFSWZ)`xdEAyc-Di(Ya6aVdIO2-~rP=CrHJ=b%r~-e<;RR9BZ0|7V@{kJtapI>4WVWb6}m zA#2`q?y^%YyJ9yIj%!dGV_W|>@t}sEsnAJO^!tki8Ofc~5lnqcbX!La^cmjZEBIdR zWLRPC^#b9OD7TFS;qcQr^FvH=Z#%YJ)=}siI(9~X!y~gA(qVA>LvsSq7iL?7tuyhG zp+)u^i+4~%N3!4fy>vM+v^^I{o|g)L&~Xwq`|#si+zX!7X%yPOXtvxU1+J5`5xFyM zi)^CQT*QehKgL|g_0o7sP(jx1-W0Ij1ND<5)7pyre*&GSZY%9KH%NKa6qgEo9IoPy zhaTONyrztY7M@7D4zgcDwr`q2M+v{w?bfCM*=vieRwa{B$HuW;K}ChznDm^FNfIqb z!=HTv3`#0T0&l4c8&JOuifrNRMjk=U&)89wDaqa#9=C{BFABtET;?aXAXSQS8q_N z{ZA*w`L@dSzxtc;${Ur+H;BjwR3ph!sV-PNB|J}tX?Pvc&k>>JCR=YQ1vzPy@BL0C zlh;fK_)@c7G-b_D=)_}5Wm)m)7 z!6Hj>4PlW{hK#AKyV#bYYe0)GoJD5l79?00enTPR_O%s;E!fR64Rs|c>4kjkv>{%e$>uR69{PGN_QN` zxetr|xXV3ce~V#JC-xn?biH5?#>=jSg;u0g^_>38S35@28OpV!+ zxEEY<$cOZ&on=m99<_Ln8ev0Ht%$R{dBbyj_Qt!jGZj zOhf$Qpn`e?CF=rCCKN_ShKoAs67Ui=?HK7qaNsKw;)1mx@j8Fu(NVWF0urQQiMeN1 z?*mxp$|!S+K-Z8;+>;P_LFp}3B@r&rjYL$A0&-bS%E4u3X8qFzU6xXef2Y2}*Gz3$ z3udfYwTmtwoA_6htK7@71BED|Q8l9MK+Stk zs_(jvb!ck05DR!a`9djG;q9=j9dL6KOEYonzhtx>xmsb2VijOA4C# zM(+9BI|$1*q3LTfWvCiwmBL6Hb@fDp)|OIfNvp@>Jzqm|u8^5og4kXP3i;-7RGWS2 zz_?C-SRq6XCqSEc_4danic9e;np5Xo63VDX^EdWUKy$8MArs5EXHLirq<`gaVS~f-oh+eHkvIK1C@W+|M44zOot0;xw z&57GfLPnjqy=&S-$mv(7e21pq%vmVPf@vPbwikfy{Pifj`eO(n7R=hqxEvQ1Ba-X$ zX3uC8IN1?R5AQZNL!~8Wk($Vwsv$9UnBA8X8}KOH4JB}oeouv1D|{nt+WNc;^B5xD zFB8Vm^K(eH;^XI6kH}^*&ElH@ChlthUl^k_w*Yni>Q#v>h807!Mi0zgTYrbV=chj{ zyR8NZM}2L0cz^m@Nm*PdbgtOPzA-?sG`G;p$D{DuW!q-)9&LIxi;#^)t8Be8tzK2X z)P9;+A0-4v1`Bv%jFt@2gBs_=KgrM1t)TONo?)gVziBE@k(aP3DVbMQ`;*PsO*L8i zR$1Gb%!v28yBiP`u^nSEJt7mYlSkKp3~J+cX12)03}j{vWV((C-^$pL^9{jrN=Yb0 z&T7fOv^OZ(_T8NJ>cxT8Cz7udSw!3Qoh^KN;d{q5dE9Xo~&F&t*jUSsw*)f+5Gd+~Zh&l+Ny&79Hr zW1S=8fhv&&4QR~$Rs9x|=^E5jGbXxYJ!)=Uw4k%l9+Rce2-DQ9HcmO!DaK)Se^b?r z{zBiG*WdFqrT7GO_PBtalyiBvLYL1AdH?|)Y1mYLZ}|CezwxGPJ*v=l$tf*MMk8`d zaz->}nI{3qncGE>6AdZw?^5Pxd94{JlQ=q~Ya7Jj%0ClkeYeezY46sXWe8XmGp`YC zRg(0kF-)SKd7HUo!1-sPeoxfbYHe5UmByWz_G!>m2H-9nqoyss%x@o%m1E(B?UN)P z8HP<8Z+JMj8np~DrPnGuEOJC=xSbUDejAEp)Yg=Yk}+TOQ33N#&NP&GY2_K|k-0HO zg?JYTz$IKtOI+bM74Igu(6db4&!Uzg5u%f_R}BIkSefq0ky?jQx2&Zcjm-?g3p8>S{2P%af<@j#`5g_1C6X-EN+>R?XkkKrn{>@LdJ8nrX zsrbw`$IPqfFis9TSaf(S4dWuVxp9|$18nN<2daU>0!NQBE>+7KI7>Dkw!r2 z(l3*J?9i6xz6hta(=TlwWyhL8@yTPUe%|CNNz=suv)DVK10d-+1)&t$nXG6#e`GRx z+9>jkhwEZN8#rTUsO=Qi#irMu80E4u+E;aS3Hg9DKQ;1l70s+P_U&Kbds3WnFBt=_ zq#VO8obip4>DisJU2?Lxzn0b>=%HYT0$D8qq>xB_i;ut3R%qI_8B3y>m3?oC*-?dE z=w=|^Kj+PD=unMV!ArOK*yz{O1Y4QLN4wF$IF!kcU5H1(mETmZhJ?A=8ge(9BWwg; zs#N;aRHgZIsOYFHPiMkW@Ol4*Wwz-0aMN<^MQMMK74rj{Hn=D4yzhEZzDyAML|xC0AHE*c!QmJ43c;-8 zmMpV1&hcTTBrw#aLrFdx;DPh@c{TT~!29JvyKf^UBw(|yJ0z7lJ+o0LBeV8CnaJ8fz)v@6ZI{|h zf5W|1c%X?k<}#R@$%-RxK84s#Q(IguUsl_n#p7m@Pnkdg7i z7fWmVGYaLB+h9Ckw4;xVg8zuM62U`Ur>>;T91m7Z zEcipwm|L8;WkxpvPR5@Sp7p!twAUdKai~e(Ofjb7Ad{Y`Wm6f(#sZfq{2MoAHnuyu z&%P1H+X7|ab^eVrh08r7VG=J{cYyEXw8+B^$t6IKo5=tj>kS8qvhL0qwc@F|EWp!c zdOpPbMtOUqPxz(i<=T9jU#z$C*EsAJaK5HTf$cor0ciR`u;)W}8ARS(MyRI4(f;H^ zF$oZIk9{XTF;yoKP_D0pDM^GEJr|H}XfZ^2q$QCDNd-Dw7Te)H#OKWE*?o7XPjZc)n%__8 z+eSDwN009{i+5Bis*TZ0&yGMH77NV)W2Pi)$H-44dVW#{cHI zjn0a7P-YgoaiJ*MF+we8nR9Z>3M00sr?3gbKYn{?pGkU7Mp|MI6y(JM0aay_CQ-gU z1Lb_si5jdk%y&qBuYxU3^jh4bL?Uy_>>f9mG4|p!Vf3|peiGLW829r61bhJ*JIo`E z*KQ)`FdTm9DE$y2SI+7a&H^lvRABSH&6=<3UkB2vkF)H!TzOYd%|a$ZBnS!ozkXnW zip_#`gK3ISr7P|3N-F$a`E6i105#Sk(63#e$dd@!= zq#Bv&_2F>GNjZd=5WPqfA2s8UwzIrF8+c!X0a>7aPcM>Jbz6km`YIsw&8cd3GMXwh z20Erry!DidO8I{`EC2cR#+!;hgCM6^YFEg$#pg?WRU)gAqj84*cS?%IGD^ZjosDhe-W2_|$b;6vFPNh}PGKKN(z z;p6|m|B36=Y0~s1`tezQM|LjdEc50i)ooFKC*_qs=H%Mz7>CnGwf<3_$2?7MV5*6( zTq%9-SjF`=6o1`ja#4CQQ9yA@vfkVlL&958pn*%NI>9ihY(Uqc>|vqJ*X3ECQzPC% zt;0%78?P5wuV;7GK37?ZINM9{yVn@?rJ3Vkc(aJ8gx#T-Ksy%9a-DEEsj2POaVN!B zMXR~W_DtYefkzpgCdj*6$Eey{0D3SIb`5XiwhFmvvP17c#cP^Ceu^#4z za&~L?X7{?E$u(|wA6Ro%a_Bb3n`=2;6`=>{fFByEG+lDag8?gzpL3VvK$= z%GKo6vyg{lB<3{Vj)}gl!O}p_3pi0DlZMR;3FFcMy(j$6wz25H1!8|{qj3R6-kp=~ zhj$=f5?jm^UUPd+j)Fg#jT*8)8h$`gXyC(Ca=ys8cGZu_uLGD8=mKY|xhO*8Oqb*y z!Fe@*h!%R6rSn)r#sOixS2n7oYtdnhG|)fcBx;iM6{tNA4PV8>wPZzcZ%25clvUvs zct)1|Z>n@X4CEv~JF?OVu9~nsz1~5%*`3-HLs0)Klq36?pO>77-Y%ClDCs!bwDh$MW6aA3x<(UCvO_Lb z=&gC3nA@k{nqlKT_1`jrFIF@zZr1-8X&zCm$2~|!Xx`$$GV=!dLA&L^nO$eR&(RVs zyuO@GL9X8)`AsGNb}dDH@F?nKRKZ@`H`~L~yDMY56xVKqb%!}X z{3|X_bKM*CiAqSDG68WVoEh6x`F3<2N)lNaRfy3wtxcy2oY;rM+j-38bZQDVv{O38 zZDeU4DHf1kix1k?)-5ln(eOikc)dM3S%UEP)3^q$$I6E`)QpR()HA4IHAz6-EV8q% z9Ts-1mu!JJ`b{MbyFiP{ikUYbF%6a%*{EqtQbaWVkPl<4nz@R5g|{9B{T1})4Mb^k z@upqXy|^+r-CND3jX{3Qz412|fLBXu4v*ULVpOCXGr@vY%zO zypSoka(hA2a(^uo$VjgTSQUP(lc8r9k}wr0BawX0&(d&s_2UB9SBIDmX}5QR9i+#W zs+xjt-RDsQHiV2CS6>aVR?d&iYZ z*J9|QDP!dYKd$A(DowcD)Lq$+d$Zqfxcu`JeP1|NDQ&`Vu9sAwm9u+S^V)@}XtWqf zc}>Y1OtfZIVoNu`MSolrHeTt;Pxc~k@SEhPYr#oU3(9l&sa4lEU~k$any zYX@F#9UB@Kd!r_;FYsb0tg^T-d0r`M*X>x$!ajhyAIK`dmI9bmYD$hnt?13tW)7z5 z&B=n-?EcvqRJq?DT>s~_{Ifs*zIpj4RcI-*M%qrQ^Of@|i`>!w>QAcN`POXIe|_ef zdF$!SaB9I$BRjMHbi=4@FacU=WHPIf@>;#W*{wr{UT??Pt-m#j@m>+`=D1FpJ8hK% zHmHNjm|VEEF@NhL67M2N9_%}6l?!6q$%CMKZ>jSXjG~pVO2!!1kBL0eTvCt=a*45- zvX-I9ohR4nbz=9dZ+brvcxe#l7??ag{AAjO;loB>CACdk+$5nOTQ~vZkt5 z@SX+=@u(IfHe_qYF&iwX0QR$|o^g>C-r89-(xJ&cU2pMBzdB8k>>(FaJ9>Q0fhd(P z3ZVoVCF6B_Ie$~BAWSsvXC|LRp2v$n`|ipvzo1b`_~}|(6Lpxmzn-A}d{)pMF_$`z zqhW0vo~gC}<(DKQN8>?Zm3JN(!{CoVZSh5ZftD-qBmDg7hPVtW(kw8vMF-N%w^rY? z?a^fLu~rRR8lj+Xr^{{B*P#tPQnS#YVHwQty}x1{wrRlN%`+ZlJPKCZAKhh7YiMg) z`Kw(CJe4GYX0C62SIj z(TI6o$Sh|3sK|%{Oal1!h{)O8_&V5QH^tYDH0RU1e9*%_vgpzK$%ZH<-m>2cOHBck zyatCnejg(9jbABhFWjZ1m^9#eH(tL>h5{4WCFg{xSuO}cyNs6t;R`t}6B^2vgj?UL z&LKA4_Q7c~AE7HVp@ZS^A5-Ht>FXdxEb5C$odB>#O#Ujy3h&ejfjt(?8TFQfb9%kb zH-Iws@8P1mLU;`+s}phMc0>Q1-7FP0P3`t$u$50Na4uV@*Lm4ao>bFLdFboG_dqrE z7M)r0ds}RKhp{~~WkkGW+rr%d^mQ^P4RZ{TO`s03SlXTC86*B4Qu_egyUKoV-$Td; zhp3K_Q>r@A$b@O74;V`ZB}rIh7D`&^75eVEeb_7q_SK*&O0#1(gJhy%WJiY@kEG$$ zV09f668~s=p&2lWp@;FlZ5Cn1j~zZg?F?lh$MLW%_l3`JOpEd98mDHM9hOpJZ2LO?cq`k5#_qr79_Jy+esr6ffu#6G z=#>=Yxh?>r8!!wr?g-PBGTYkTs2?_3rk1TxvE-&%YoOhll0%92t4^=u3S4YVV^5`} zv~AUj;Y`3a!{Va6fb2zp>AypV15iByQ~C|NM^ zJRl-3+WN1;f{@Os0okZM$M9v%ZM`69>6#zZ5494WhMP$RkQj0dHpxay$7;2WB2hB* z?j4TFB3Y~6FlqzC36*Cq&V}Z*o|5>3*-x{@8565Sj8S>yv0$Vr_eOB0T?HjAjZ@1y zKnY6kTcO1!pUdD>e|N*4<%Zt#NHhaHSrr@yj!X08E;L=`&CK2@&r4eM)3amttYD9K zSGI$1_<9ynI9NkcbylkeLX`PNr2JECaAJ0KmnPf`zke-mxn{Zakw@XI*_-M5XB^Wb z&$EuHV+OR|b+BWV_wm-Bb4?%nsF+jOc)p7?K@=@PpP3)`yd-ZFKiCImg~ke6^UIBM z)wjO>!lQ|ojU3l7yT$~W zo}4tXzjjSXo1(s@9zs$tosw0ZWs*Jb3{S6*GtPRxvONno{MVR}_iA!6`f>h9bA+LQ z?}qgwK^d-V8-?Z$i0BTp$1b>~BuKR(><1sn;92=Q#REh09G^*}(}vK5xUfxcqrqB} z;)bCkQnSxAqpIftql-*oDxmlq>KPoq=R5Y)UDd!ETjp!Z#Bah|?XU}+=&IWAVp&!- z-;2rSOh=>TxQDqLf-77?wNd#)Wcv2H&@1ifT{jkwob2qOhI;O6HDl$=z;_6p1pVR* zG^pA$c3mRI&TK>l`J1W@?+8a6%9rFdO ziyD9b=g$5A{5|#2NDUR04}0f8UDefm#5cbJDVn$eWUS9^5so!{LMRc@msnI}V}2`R z0D4Is1szpG#0Vf|BnO}=ta$>q+?>l<+txm1>_nL|4W>k z@fr0z7~TL8uDn@ z8x`GmuUAxTBU!FGRF8#Lw^38_3BmPznScxpS|z?PR|Sd0Cb(m#=6A4^k7mSf03UtG z?pbB@&+zlaV_ovbUP36s8f3>w4=J`}Q_#(l+Og!RimWzegwsJ=gNV8EX_KqmYi- zsE#p(t!`G8QZ)9LAP>lalHsa$*z{QVMk%uOdoz!m4OwfhU(Ub9f=jI@U$jFm z`Ke=^pdB~fN5!+GV<($3XQexB*Fya8Ps7AmMrJ1MZgws&cj>VgKLWUtgjx$586N{A zIi*69JNu^M#1l!D(=MEclI6O`CVt$Py4xG#vsQNQAf*DQX}S4t&R3wPt{ZH(@X~Ql zC~4A31%coNHr}zT6-<>0$R^u_&b@JiRqCUV5%CXRmvtwS=`HqJrou=~N&BzSUNSG< zI%qKT&TC7ix#-vs@0yv!MSnbG&9|hQ4!1&5BvO^0A8Y4z#KYmHTS;b9fLMSCOI7Hx zk%hSsF2ew)z8&i6q#U0f2+NzYm%XAgOSP8E$M8rUbiR;kVZDd*QI%e|G@!m#HJw>b?Rn@mgfAJHIUp8JFEBu&oOSm0r0(#*EhOKuxy)Pc*F zF*NKUQ$)?G*Nb2qk;f?}u_d|J}V&$nmMid3=Q1P-D2B`AW^J`Lx$+ySNh^iZvCcDyO*f>1Fzv!-MHVkAV*>c2gCh zvR;E}{5u%xR~FjM?6A7&QMI$c-A6)Y!|hV4EV6( zqsq#6%f=@UL>AqhiLLjjvbh<}%5(dsmuw84m7u?>(Plp@m1siD@lH_iV^p7zproCM zj=}->wrn&_T{c;oO$v6O`8cTH_NThGg?_66-+-1zd7hBXb+L&pDT90aQ9EGr;?G(b zDe;DkTf&{ElI{Q8IPfjVoh~(8$QargbUkW>b6)H=$_msy1Lq(v@n&ZSaj8P z1?fk&(I7n3%}+ae*pCk;cE$v^JbY%?HJtCSN8`COFt^zQ@1r;4q`WtfPJcAN7Fiv1B@Zd3QERsJNF+6^z2Qa zPj9~XfhwzLOOcPP>+&)#=_G*HQ$6Neg!e(y_3*K5%vG>GCazpj0~wsL+`o00gZ5}z zw0n+$DpsjyH@c97Ev_&+vA5I(38rlV>i6HXHkKo!9~g|R;l|a=yCM7?iHs7+PL#)8 z2x&nAk54|TJL3cB$jj)D^7QaLVTno3k8oyQhurpmFTTr$cbzgY(Hy_`6voL2 z2HCQ=Lm1!}iSE-s-`ksx0JX2wUSLbqLP_uEM>HlXM-0e>7S=OG!<*9+waU*hbwtPWyFgdf%oOAyXj? zg+M-F94*BE!<*i1x@-<;t$X=4kKH5ut3f@>;}XFWwk_Dn=>T<1uUB#@d#W8v0kA_7 zE@<>itC40ajuIugA6j@SasfP*nT-Gt59(pPkAC(lE1Mtfh{xvvnu6LJ;i1`D0q>`|~Po05+RfRBd)fGcbN*|z&?sbkuv zBtv%#V`HDckm<~)${htjss7S2ian>(*)9xP=isX);H^CoA5vfmUr5`rO-kc|lLsHd zJ>a!%IvE;W0~|1jnMZiuRLwZnMJMn#)eX}o`LyAK7@icax%v*`3zz)sP4H1Ad&X2J zj{TZh3+O{po8;ATG2Ad8O)n**M!^L2ICNHqOw-C(zgfT(9pnR&N+>=lIlU`aa>#wZ z+)S|8g#;G6mS~Va!(nE~mP$B$$v;~m0aq*tdlP>f>Zz|8ptHZ~kW_K&*lanKPbvt= zWZX>8U`+uQ*J52d^BG_2J+}Q$YY;D9E*T}w7vZKM7ZLNM$HS_vp25xUlhrj`~V_2JmEHq1;BN9OikkvCZcW+2CC$ z9LcUhKcR9{7y0|Y9*+O#=4~<+mC-fxD#oGESFb%$j>DNeMEWpEUVuDLbcJ6uGJw{7 z1{|4c1?rFst9ctP7Pjx81KLBo%ygY|doKlh+dFLD+U}&VTOtUaOXpsDQWEzBQyfQx z$N3;&Mrx0{dJtpCd#ImpoTM@qy*2(=0#Qvt51dRe3 zh1gFCxZuHD&{s`4-+HkbO*rk9Xhmrm(euY*2|ytm)LV8}Q&ISzt<|-x&d(mD5Gom4 zW6zPMHF;S&>USA4`zrNnb~$=A@Cn~?gS_9*AYEdNB9p8+6dFQCO!KQ}7^RlB~QR7;R z>`i%Lkw(adxP;otI?T0xuH;waE8~2Oy2Xy`($f^L;=8BCA<`FnCQ?Sz z5-nLTKhjVb#;%m&<*Sp^o;3aCj6eg+NrGST$X@5SVHPsBEp9*>8>IZi_Fv-={EYC& zlq=9t8Q`=TFKXWxmLkezzHt1(@pLi6lYhSSFQz9&GN5PF934*;S!N(&2s|Fmpq+Lo zKh0wiB9b_rH<&FSQ^~K=a!3EB+aEr9Ofyx_SqU)25}gJviL=M6(OzY&pUg@pdmoXc zeXBfDf1@)Wd{NkVd2LZ=d+~ryej*!rJ)-Oosi?ZWed9oQ6l|Vru3r)JCaD0NZg7iq zVfw2Hdvz358QZrW{T|sAnr~N-CkLo+$`zj-cu`GK`i6^3|1v#iq2+=YumgciJEeUR zG3BD|OWdXj#$6H&;UK5-1kf}>%_Y?k4G18@XCdrvQ{p?}FhuNys8wP31a~>5NMEcr zQA0V)V;l8zcb)llJ<*-dMotqr7MkSQT;#((%7^AyM__7;mH)^naP|H+rQ`0KmH+Un zbYib;{3{bb!MDX;vAE(ikDz7g|ugDmZ^qgkSU%=~CM)Im(br=JXlxKl#IDUaT^1jJkT z9h0j;Mhdi$kc8+)`Qy-F{PfEc7}3 z_4DcJR&PA(6U`cM)#A@CZ77aG^jj|2G4wPtrPh4Cl*8HX zdBdjsQg{W=d~T`S3&9mhP9ARa0NL5-1X~KmZa|*biK9ku-I17~nxUFJNnLWJ#$J$w z8nJ4g;#0?lt0cv2W6X+If-8!$t+3YB@L40lZ>el4yU7#nz~20cqzc@=c3)d!dW(Dn zM9v=#ct3eyN+6^Z7CfP0mtV^AaDdA1cdfNgXb>E}bjpN(i1(0-r$#b4^BNnexBMt= zHSVUEv8c9l*@9PKo)wxaHS50~;p19#u0NV}OhozVG@BJ{2VH$6E$ul{t>PLyenQW; zxdO&=cDEo%?}HG1Ri(mt5D;&VRi{J@CH>6kOpVUv3TN|pxyjp!?5%IBqh(haFn7Wb z2_Ib2bQskwt%V9a9MqG+HJ*Cbn+_a`zp3JQdMYrDK{AN9B3!p`56_ha;nit*$)B5b zS(RU479VO=KQGRt@l2r^|J9^9DILUUoE4oaoj-Nt8W2VZd@It`l~{UIITDY7+2D)^Gwt?6 z|LnOi7|c*IpG;FoyY+a*N9U=Y_V&^~v2T+jH^(5Y??m5<@b~|h>Y5{7=kaX+I@7uO zTnlpd34PP`_oDHAe;%`LROFk)cOYpIp~^|4eWXj>xWOiyUM}z&z=Qwoa0OV(cY8Q6 z)-qqTN&vW`R|EJ^=;s*}|HPX9>mEy?Mn=@qaQGWx*Eh8l4=x)MQlXpXbhr!iDTvR? zpy6e4agn-S+M^jraV&YJSN#4WmA}_e`J++a`va$ZpCz*i7?5-4iUu4Bl#^eGDdu;5 z$1VFYIygUZiN<8EM**a+&|x@kqCQ<_goHWf=?F8Ukompp>6-gdRw8g*-1o#7QyXJ;oxr}24b;Ze`u%IZ}6n?)A4pG_yOM~#OHu7h#z@ApK| zkyzGX9(QZAz#(y=DleyI>vRv*hNm+3QOitgKa1eDE@@NC$sLiWM`@kYwO=!@to<6U zrn7L}ydn?>&nj{eEOZ8V7Y+Y)|B1yteU4r%XCIsl-q^We=a*cd>&;M-woG+@;O_v9 zj?C`5LmF)P8!UKPDD}?{WXjh@9mOrvmkYGuxJI@Uk(hkD42t7b!bS>_q5JEv`f``H z%9fqCX3cXBZm`bogkf*lEl0C#7?Qb!={yj^ADsl~HSCbo8Fc||AxTt$SN>>i?Q6I6 zl=zu?P=J^fs?nLR?sQEt=D@&5vSn!6F#y6sob!LBHOH6#=a^HmqR-A|vSLG2ID}ID zTd{xG{jc0+pTpok{yiwEa$l}38d6=o={iZvuHC5b2W5I^mdjk)IOBL?%UALUQp*Rb zWe}~pQ?pZN6!x?!qRI+OLcW3c<~6)e=3e67?$e-`^T&t;tDd0e7vU@!TCCj@^shXS zz~LD==M?+<54?;ELB_E@;i~wwuSjQ_kg-VgnRB0+x%YSP%zf@Jf3OMcXRnpLcGg-Sd%crv9oG$yR0o`zOnX0+&CETSOX}|Z zB8ajEG&dY>RP4*7vsKG)V~AuMwpYij&42Y7)kCQAM%dH z{(Pn`O9JeLK_1iq2sgLo7Xi?u(->W))j$J<>g0tQLD_at?OgKGl0+{fL4uBWZ*g(c zxRyt8aRr4&<1-wcD;glDFavvG61CLN?p$NCA2w(#m0l-h1kaDkN981VHj>UVG@`Uv zN;Nh4MEElV2l`;nmH?I8F<&a^1id7-3475gZ{TepXHYl+`ousi1oVq4zbdR$I_~6;Y5Vc~{?Y z)K-h+gQaFMHJ1N{2oSuET%hUTXsn$!vk%_gjq+l*&b@J>s9iQtW5q-KA$@Fgo2uCq z4)OtNO-PK_ac+^7i#TDCC8HcP9R*9;GvVbl!4lUK#iCh=XWldna0o>RjCUTXeQK7* zKHhQAvmRPM->m;R^3$eI)tB(3t@FoDrh$?hkl!5Qc~@vbsDkl1m*iK&BVZxMjY71> z!3=Pm(}1S;#aRskL3*FrgF%)Y;tELWG`($q{eGl{HB!KtAS1QRI$TuLW!S4HJl1je zm>}6~DE?T0ylF2zG_LIjbjlFE|R`|NF1zRJVDN~k9_zj@=w=>4j(^6`_&LgmdK z!5Y4pTOWqLYw2?v5R-~yt>ATi zGLmuy6<%gGSD(4|G*F!j^XV*@a8-xEeaF#;mNg@uH3F@u)`Jr!$62B)_~5GlZ>#RV z{11s@>eE)iF;SRA^L+W?0o>IfH%Zrq=1O#K=c3b!p{G=S`gE}HST>xM9Hmk^S(O&G z;c@x+#Et-eBKu4m2~Zu^&wq06|Tt7OGHtPh< zi->g!OT z+5~gW!*IO109%eFzb%`Yuf5n|Z?CXhTMQU8dEMvx?m#a=SQ|WSDC`;5k_Y6g&kp++ zAxuMyQcLNxL!I9oIOKP0l{ckt4O_)IopMs}FL~zOsezD^m{AD+q)GB%mZnC%&0Hx? zMlBTw;clAB<|f6YZQ{@JgwsA~EZ>Y8@yiYHLdC;E7^j_RM}ybj77#m8*O6MecfD#khkt6y!b)SD)2V@`?&QeRdh&9W$f=nviuR@s?*=*R;VpIeH7Ai=IgWx z!Z9F=6oCE(f~Ep>;X)KI0KC4?bD;u8){o`dlj>TE3fs(6KF@q$PYyDerM0K#?FV)d z&p&&U+8aL4SY!%pl<`)7no%a=q<~j@v}Je6knUy92!RG??gD{@p-n} z&PPSl-wuXn*ch%gwes7FtevVk9+#B$=_sd@mr+>fx{}E@T5@teo;QA@$sa7d{Xk~> zyU8aTXxs+@V_T*XCn5Bv>MafXA<3w$ykB>d(7!rh)6+FS-r~njH@jfNbZDh_+(>*eLU9u?>#cL9{n)7*9@5G1Js^zuakgjh2K`mwN69?ZcUm@SG zH6-&*fpdcE_O!*}V`KKl`ZI%sWwHE22bhyKbDp!U$q|^XhmTsN$M# zkiNOJnN}3(Y@3`s0@PsLo#~mnBBE<&smQV=Ijz7lPC=pf@O&!a)tO9-PcDh}kx{G> zGx5;e@&FImsglzv$aSteICM8P?)f6?_-U8^N}(y?=k7^>4}SLH&)4>aadjdYBLR5v zD$2U|`zQ#z@w5|z>24A?L#}h5cDnM^$DqE02+aYFq;uA)#3mJu2&MQ;in$nvYemi5 zMiiq}XA$4Pn!?-Y0_GQ0X2kloBAk!yA2|6hZ5wqi3Q1BzMU?_89Dz^_|KCS7%ApM6CP9*ch7EkHE*uj>Q zd^oLu6?i6ZQR|ljAg9D-kdn;nl6xS}`u1-ih$|n5s%(O4U9NF3O$ARb@e& z($p}CIcdtbsmfRyK+l!Z6dDcm!ao;zt+UCE)bUu-+kJGnUQr4t!l27~pUK9tsS8oQ zy;#Vfju(!M1tkKJU=IcS!xULkJV;qc4&0gfbep1J^;|V>%KsLWqcy&4rKxA1Io7a2 zvY5EhwUNhw4bUk@(AhvWjw8%&Raj5!m?%_Z7VK?`Hdv*4xGg8IFMYM3ZR-;P)fUka zHMkc>sD(iuqNXjHy3xY;LD<#=8`2qD6KN2lD;$x}I5MEgxkDW@^kOc2oN+C*W;Tay z`lJ4-Z)j{}Q*~J(d{2-d%r6ol=yEGGnAD7UIw#G&EUiFh1Ri?_U6e0%^hXsGkhI^c z@!=~Ry>{9|Txt+4Vx&3d$0WAo{dJR_{{&K3eym!h!$E^b>}Mn@+cGl|=QlIMj3X4kVi{@EWt| zrNXD6{JOy^+JyVB@>#kIQwF8kY&G)%wwJr66g+IG!ECtT_+3|9S}Bm2@N_0K>z549 z-f+b1wyC1FyEuoz6NolWNXaUxWVrTwRmzJA6Xx9)taVFgt&FSflvF^LdpS?@Yko@b zH~+{=4B&-Zi8lTDy3+TtZ=e~%n-acE2`r=H3=7Hmn`FvF4<u3(TzL(-nOH)*`x0r3+ZnLRp)wQBG9HD zk)>Gk%3eD7;wK$NP@j-{QnNAR!>A!gz#y~16Sg-@sLgQB=f*S{jSJ2a6xCigAE?;c z)|z@K=4+8mk#V44(jGkG*l7&{jUdr|!u~3dU=yrt-40>3PEt79pmgTv-yBZEeyfPL zJ=w+R?d2g#TwawlXri<%?#yapK+!Pjw*r2&E%Zn97O2~ASMSN}IZ>W_Exgum!Ms&s zz0p0G=xI4XCSX*^V#S;Btk`CYPsmMkhr>+UiRdbw)JwBcfj6BvZM19x!pW_TQ7*#l zdyOLv%?%987K;K)ibC8iHvLyWEoMflwEH~STyx(e zW3tAdVf3v`KB(xZ9g(`WG-ibJuuZLJh@9fh2(j&`%q`5Lr;bdBHLP*c(o)SB1@-DR z_{=h(aUD*<3adG*sP@xPjMHhtm~#5q0D8r!Lm|!SiS2MnJFr7qdG;3OLJmq?VV42E+1=3$}S))zT)2UwUO^Ty=W&=f>QN_`TLj5mp6}8 z$W&4W8hVtGBw-OHE*z=8RY)ud$-}kRcWU(wYmpKD1?KJWxlBwsi8hg2d>-CnMR^dI z>J;p!*XDT$Si?g9Uv|1=ef103P8Vh5T93l#z-y$ zhJ_NiJO>pGlTfV}%QcTr77n!94`eW+Y_G<*sb_+4Nlr~zTw_ZOWg50d$W!y}C3i=> z9>HaLUG#0kfQf3t;>LozDoy88yu+^BMqSh$*?Y8(&-UWk-q({&d-}v-b zYkB$>X424nV%~Q*(B?P}ERsM&3puI7s8A5*XRO`w4Gca<1 z2bB-1P0r0*I_~p2*egg9&?#xs*2|b(?ic$_8_k2={PwUYB{fSC`XMUgBHxvXe+%g> zt$l0-PZFxKjIu4x$z@pDkyRaEZd{Iyj;wIRAEzV}-FT+<7t7|Vu~4=O4%?>C z<=HfUH?ir|eEZ2s&+^G^#n|EIu1*5|3B-M_)$MLk+s57vsWQ;#vBma}q(2J#oO!!a z9qf#wDpsrUM`j5oCHiCCP1=4tc!8}${qmdv(18KD@!IfnDcLjNIuiGG>)GcQ2%upN z!z}CZ?emCUqTjle|JsEzV_rC=aKgyp!0SPM0#u|o*1=H6me(jjz&Re)Y`hkY&flim zBIo|8!__A@>E|Uch-lJ^w$ID8T$u^CvQ%yc>}9$}EeLtbqF?U4p%~hfgAi}QvY{0o z$}sn=#FO+kR)P#>L#&RWLM0v%}cKyn`LT8FhD5Hud|=7qQ-z>WWo@hY@B0Ks^P1c-A#L zPpMBk>3U_MpN5`*wp=FbOfJ{!MVkC^#e?1-!8xHShGmhnD*UNI>S+B8l<;_ZQmV$| zMC*}BdCWZ6-Qe`>Ce+~Mmr5itmDrAE{z?(0yKcvk=uU|-tv;@3y2nD29Am>J_L zpTHuhzIM9jy|UIj#y4Z%;HfqhCR(e*X)rANhg=uW>Bh6jB+Vjo8mmS1dF@0e2QTF( zgCyS3!iFUaPj7_Uej@tq(O(AY-~7P^6W$M~aq^s*>QFe!^@IGp4WaOp=qnt@7x-7~ z_se|iTyB8mRB`u*m(L*uJ|rPoI{cJcu^!y2PbVZKTu{xaTc*{o5B3J@tno@0_v`MC z4Zp%3>Nj%fWF5E8{%T&IjC`iiOe|WqXsIu9V=B3-qR)7`WpnK@=d>K#8-ycGtIX`< z2}zA)1l2GeY$pjXs_Wug;7_CEEzVvk5XlH)GSQ_x#HLU#jwQxXh6MIh6{y$^_WC!Z zeK8-@gK88mVL*)$l`s%YRYhhZ_NGo#OnNqh>~sy_GAshKsmk)*md+TTeK zoF2jvgXTAQ5M~r|*llNg9e?BmAUx94u|I>S_h94UkE3N6tV&jVf3Laza2sATWJ%vv zgO-(z4HxDRK|#dgC-G;jN$3%qUpRl_e|*@LO4w3j+o`V-T>^v9k3dM|Nb>L7bS( zMn`IJr%0v9oefnzx!yLDnj(V%EX1SlZAv&G^%-mJP)=RXS3Rg)Ox13>MNJrf+Nq;? z3Y}`zE)Z$4%@RDyC**FBk@#jK0dd{r8R&Yzesu2TE1uDc_oi$`ktoA zpakkCjV4L`hy8NS@&QE2EJJ(3qAF`ftj_bKuu64xPe9sfUuFcoc+9mQ$zjaUni@G? z23jlQ5XEC$+x5%HJf7~wxvBKQ!<72Hd*?tc_NowS0xDBp>2EH(2}k9Hok z?m9jsPmz9=bO7k4$RYeI0p1B^(c`)*3-VVuZFk4p8fxxyw>{{Zxk3b86F6(AI~H6W zo-;Wpj^&vJERRh@38s*CwZQeo;*-)0>YSiC4i8?nEO6A~bNx4QxA=n$+#BH@F@r4^ zwiyj@nRQjM0iRH=f^w6G*>Ia>u@YPdpQ5Q=+bim7HOdaL=qEGm=G8}xYJs7qc=@;^ zu|2A#@0ic>&O`Kc)k~uwAfGW3jRJ=glVK-U3YU4#he|^Qoocy18j0Y{?$`T<2Upsn zcEgm6?!v%nt^O(D{VLhYmiFsFN&nj-=)6S8`=^7S$~THulsafVdQzCU6zi6r4|+$w znwW>q%nCC9AdW_+3T_rPBJ6Y1iW&rHEbcW(zHtwb^Q&=fifX8-K&~7Fz5SE}e^`s} zN~FcSuC%Mqmz$B5CEjrGa1c!0jxZJBN@&;B&MmT7d$6Wi+pXqst7MlWo-Vqkh<=9MX1MaC}7!Y6;hA;|?;c!Li2uShO0 zZ%REf9}=7a?>N#hiD2^1YJk z>HyPCBaYvRrouisF5#T~)8DawzjgbU+eq|9a_Wssl{%`mZeIr1+~0{T+1|IsJ77+o ziLzclV*fe`eQ{`NO?M(ZV^YBUkLUm9_wHTRKVCKDAI^Vt`%sv}p(uS$mP`H0<=sa0 z--)WpUe5K@E?kJvJ;b)K-QfkWiw3KT=gecw43FkilZc_^vu&bMg?2N`%#qO4AaSTI z6&~`*+fpyz_a5I6bh&fqN@+yC{dh6S5t7W=%HDzyvRj zh$=QzR9wbQBU+aJsNC-b%sjybGY{lHjjIqRyGR#K~^tLtPKss%| zBAEp~!zD(+1m>!x!HI~%qdZgdK}tGLW{lTVY5)eU`__?%)7g{oU-r~>SBLw8tpLqU zBgXRZgtCz`56jKbZ%s+=*1qxW-^+9FEQ%KHkSHAxT#U=s*a~drAvF@94u5$RP68?= z2B-jB=i1=yxBKc-T066hUSUtv$1O7}o6_X;I>Y$lM&y)>4r+*XlsHTY{K3lFH`U+< zoheMPiJ%%YaVUJRhB^9Z2}&0C`9 zw9wLn!L{5-r_HFxcc|Y;+&iPGuvn{)S}E*&UyQ9mgr_z@@>i|lXWa0RUV4;E`b?Evh4;#VhoZ;ZAKviaiCnT1yfkPL?T+bjHCmLA=eG2|f}jX| zPuoGn??hk3p@|7kfW3nC^9xtzRnGaZ?$sCS=AFi}CRFAs)Y7s*A(+KoH!_3Nh5&or z?i|yk@5-n{Bv@&cF~7iy_%Mp*S85ZkWT(bKt8zM5OGay5N2tuM>Y$)bpsJa{L5G;0 zSy9Oup7MZ-ue~JAuj~77nFy!-ci&cauBwusatIM!H?q~qwH@tFa_dNUopM9vx5#$$ z$yeWlOa zYY!o6H@QadDgu$QWrdRWsl3B}_O!nlB&J$(!ziRDbh-_~J60NI2j>TU41Za6=vErR z1Pv36lh`@?r<+%bU8-i5=Dxb%n?9ttGhoNjOxj(x(Uk$b?K#Be8@R^wXHNS&bWKXe z%Nzp1lB;=g1EPESto^&k#AqR{y@w0-s1D1rH6fTiUKZv<(O6StGd44P?Z|1p3x}3g z8nIJv^ap<%XH2R%Gkw7LL}T}Jy#B9Rtpap*#K6PuYNgTo!Ks`S&!lD2S=?@x&E@su zzI&W~DpZkQf6BMa*1A@oi#81`n@+2=RwN%;dldY<#2ONSTx?@j=-V=X1c_Tz)gW)* z4tu?m`^##_V{h%S!WBkEDsx4(>9Ox-&I5ReE1s|xEIZfO%+=m3@xE86_GB$j*Ot1} z+eubAK^$te1MQ(JLRb_178r)IQwgrPD@g5lT@e?T_lKh4BvD(>cHu@P`b zgV|$87yhqXMvFZI!;Yqwd3~9dewkn}&189bb^ExD^N8R-OeKhplV?D(4fGX2Yz_#t zfxSvsf+eEQg{bu}$F!-jFnb4s^7C-i;d(QTfm2b@6A@;krZxkN0B00{*oaqX-?FKk z(CJ5xfkpMGJ_r>rn`k&b(bDR=Ku~Q~-GW3T-=x&+u4oU2c75!LI+>hIiu|BEkFKpCdl`3w2(~+bQ;j|3~!r?h02Wgikq!?CZ ze#z7|RNBH1sa;|mT-vDR0f1!@T(Fd{4)rb^`2JKJ2izbiVmP+`Tc=ULt$O2V2YQ=o zRZn+p+tV?jac84Rm6C_m8?!sQ(Y#)X6!6qYT!Ud78~$3ase|Q2SgkTHLIVztu&Id2 zGzL$Q{`#GW9wR@rMEPr*^26)ReNMi|(6Dtpg$k?y9jp zLdz?0^-+C@C3@pV2uj`Rk-3u2?Z>tZs*gF-+mqf8O{qO-mR`^7b|-!P)ismPE*esP z5R02B!DU?|$jAs9osnzJ*eQ`IUsH7)lc*buv~! z+PUe`CXWdvcnk9J@xtu(nw$_Y%Dp0^7IgW#H+E|b zr}paCL=JBrTWyYv3P)4)vpcSSuEPuD-I>gja>;I-!<8GlD-9Ym@u7KM;$w6C0zCqB zA#;@86^_+)jgJ{3V?|$Awhyoh@Pxbn+NQRQYj7K!4Ms>8V)~ES<>V?>4JW)x#_$@&m19eNHsd4)t#ua>>!J;JpeJ^!g#1} zr{=F!M(ErN?*qr_iDMu(vLCkjMjqXhy>h&-?`j9Hxav!^IQwE@<3VJAHgfKC=wfc3 z*JMPm8GCkV?%%X~4=Q_Dl>2lR;grc{Z(0abT-r>QhZ&p=6?;x{ycdAK@fMBaA$73Ce8&$-d1LQyF*qeNNQXu9|kbnG>{;b4E*&L58 zju)6jL>T2tyJxWNawF1Fhp5)|%Q$b@6!zH)Z6Dlm8!6I!o=-jny z_-HlK#f@WIYQ^X8`F@SyaIFUS`ME z2g52m*S&R=t7u}bC`r5LI*>&Zzwg=m6RTFSR>U1~lF-B%3!&_Z;X%DE^^z4I$;i34 zSm!e$Q-CIldv83gNknnwu0ZeyL65?ocq1=IwFwgZsp%$alyPPULhO13OEEP!VKeUa z)Gi4Az&ImLMwo9PJmsxEV2TQFTFQavbj^ZoVB?)b>fEtypd)}CWyoxUMQ;`y%MlnG zCy+n|Z#$TG&>oWL?4hr)e6B2eXqna`>&CE9oH6|=c7UJ}g4&lJf!LHv86>ra@0Dnr zIfGQDIE#)w+4ORd9iMs@qGGMDYKf4N8$c|S7Q_pXx#=;#6Pfjby3&p@9Q)#5nz@X7 zLx_LPOZ}qdJo;f&-HXvdQHJ<+wX|;O`(d>d(RR&-7UQ#_UOjni4P>ZWKXVN&d!k>BmSf?ift(a*k*uwZur(#GtxZ<@+H|o>$iM zd&qO!;P-_Yi|laFNsrf&(|XC(R$x^BDF(||Nzd;^^{C1i^i?=z>;si+6<^+E|3`Fs zcVcT*GmmUa>(%cMJ~w8t_$P7c@T$+~Bf=)LkSLvlG}rMG4Ig6v@UMQ4lR(gP%Y?To z;reGD?gsqN+q%Oe{F*ii=Q+p0vLUNylP%w4efP(rQa<;2Po$6Wq;WzQU8+}A7Q0GE zK&_RjOq<~Yi(OU78KHS>25NraJtD5>hg)^;^@-_gKCNe&+s+NJ??DXO!|>gmB(-@N zDU`!y=;jL+?~=^DiJ}^6vr$CpW8OxCN}rZ#!Mfq-753E=p}`u`EjU@%9h(9*ea9fD zo9{kn@^$Ql>xAdvVjRBH>|EGBQ)eOH0O^m00n(4)k_90-cOE-q!TGMEb%`_^HCYbG z_{W>mJjzQB$o`;~2vZ^P?qGH;r@{<3)dS~K9f7w*Bwn`mO7q?{>mL_*ga|H1z`?L$3rH_rv&nz98 z{>5p5XOXFT!PTECGy1WZo^9tD{uHW5E&5tLM>M4$j$EsTaxVLQY7Dz5GjpnWh#vkm zd;qY)hMmd!21UQNGMOdklbzgd7Q=i>78Bn!zK&V0m}Mu& z?p∨N8p&UJBswfyJA%GVuLsj(or)Vd^dviK%ag3^iK(R#&$eTh^!5Fzd7hPw_qy zNX{pC3=&pAO(_eI&@2BHt^HpgF5t{7uXNx@*Sexvq=T}KMx(vpqS6)({{uZ)57%>}ph-aM!EZO}4>x`O&CxN@@ zreoI~&D**P5_V#5*zhA;jpZ0Bndr>?)O_Bh-nFkv8ha{@Z!hz<0mzZf>ecf|T-LWQ zWarn2E)-|O`DR7|;1pfmi|mkf)qOqxmCSc9y73JcBv+>0mMg@cyS`mp_kqzSWhRGL zrnX4&q&>r;Tv$a|YRLJO^;pcvi+N4+D(k72d5BYk0pXF*JC{EvjrBbykGx zjMb`(0db;$*d~`#YXAS~BZZ3lyISr$)~+#*ltoSEp2=*L7ICvDpyoOX5TBs!l6!i% z+roq-hjDj~#>c(~g5zKhHXT)B{iIZfFD699CVl!W!|>Dtvtc{mwMB(fvoDb9QoPW^ z1>@e&rl00S%=5u5NHMc|OS}+CSkwnj8UZ>7WrXE^OwQ`R@j%Em;{~j+<`<0o4Lk80 zr?yJ05*F~@JQYlp`5hZWE=F}V*8=O0$6K@ev7oKn98a;z$L4!OY9=>~CHI(ysF-fG zcsPm;Y(th$#9`5M&1FlxV*Y!B2XG=wU~lo)m)rn%{}f=0ulSh)Fy=U#=ixDWJmgDh z%IlJM8uh3~vMeR1Rn3)5^o%KU6eGYNTdA7|Q;yws?`2+k8nwvxGR-PlbjS|Lvw$ef?m{h zUY{*qQ^5$UE}hm0BoTp&&Agx8g2ei-O8{A5_w_>d!Wr%jk2u;chW`W{wK|mU#x}`D z3nxk}l7ZXFU~Itij{@&$9>?4wcP&cvs7AcKIiK}D6q=Sz` z!Ad*^qMOkvyLypb4)h0FnoERfI9=_r@(W7-asR}<=QUIC`4)vA&uiYkd~8ubV?Y(* zn)9m&iBtBlsnf;2L?p(o9m7`v_k90O(2#N`f3RRLne5+iHIa4Qf*=d03TJ zB&u6HnkR4Y@iiJ@!pt);P9Rs;zxme*krk-MU_IM41|vp-nX8z$rf>m$cM0;!W?2yT z384ms>@oL(YnP5T5f;P_v)`^x+|Zv0j{e0ke%s8-NO!G(*m&1I78x1b#MfQ#F+wuv zc1!*i#I@0u1DHcFoI$*!Gy+M{t{tE>-UVbij-?aEVFpyUE4ze+TT+jzSf4JMzK8J- zCfBaG(0@wNd5Kl$PeyTu5riP*p02|!%LNxSwsi8slgccKvZddr?^ux_*_1iSjz30>gVI`*}~%)q7}T1`@RY5ufn=h`x=r)F6qj0GmZ3Bw#!|t zgfZyLH#y&i3DEpyGKh{k8Cyy-D24yX-GPH~EAZN1sz(AsxjRlUQGuynMR50R%_ycD zI(}QC^Wj*qnSLfHvk$FNFS%3-qATeW@)OgNG~YM~eM95ShnF9oTe$P|(#h=8%^^Le zDy|%lvmb0PrYw%xkKfUDUjWtqZF5(^mwkL+Uwrx3zoEcr~{n?QR)k;q%Tqy+_lD{{I6AXf3EK5`+&<2S` zp%`OAqTasARJ40Z^1|qtw#BD7Q(+rR%fSq+hrk9EwRmSt&ywMX9_xa~(^eX{j7GX! zk!SpOM_gF+PkVpW5Gg+WTN^hZ|M%M$r~e;k>MMya|GYc@yW^EB;7Ppv1pC6Bd;U)5 z>JpvTx=h@dbhARa#ZNG?FG<3W)b~A(KlHyy*IiYsyu$x?k5IP6mn*+d4|NH7GqLU5QLs?hV--&#$b)9jy0c_3tSO4kB{ntM^2{>MY=K0>}yAOS392(Zg9_M;KW}|bMa`K%j-_mT=QOwL9o?>3W&7cuGb6sXt;0c>DScA?x9Rv zr6QA!qs>))9b)!y*er;a_fp=mg%*1D=r+fnogn#F*YaF}&?!{1o#-8!nSOi2%p;x- zP^|Nvag2Ty9tmxZ&0!aC)e=(89HCLwQ|VmZtO#v=)K$SJLU*IfFE%Inr8)ck&54ZH-}Z~S zmu$mG=E)HKZ zCI53mBo7~$3-QUgHNn4`Oo-BXzGpglgFAcOf}|)CMvW~h=6m-~P@zYp0U90vXI60k z%B9@a(CVAh_!df&h5~>y3xNkn|AqK{!0yf6=A!nJX$d}a8O&5925@Fmy#dZFtNYJ= zFo5&E3l5Q=`0zC7=z7A_+EBru&|P1fq&&ASyF~@vB8By0L}*F3C%(~+y7zkysu-3s3zpwx`&;$EJItR`ZkH5cf>IcY8Q>Uz`Xn^Ep9WY7z-RI;HK6_Pz8AErylH)W!4p zU0dsN_(JUhjrFQ@%^via6^8uMJ@7H+v8Lo5XmJOx*rvqDN^8kNsEfw=(|`~g zU+66J&=Px6y4y!zi7v1*`!1QMN4VrucW-s7I6;{eI3mAjbXpqJ2dYnRN!X$4q&`^w z$@=>Hc9kIP7D?a&L+WTLEn|XgdZapYI!kP!8{~em3$gJ{h{bcc9U`%B|He4|=Tq-} z*}~}_mNSVPQ93+ePSCO-MTgx-F8~qbeex@3Uvzvjv@Ga12GkU^Ybuppy>~}@As_5$ zpZs*Z3L>Qpi+AG4YOF-s;?*A7ajQ=<*aX&HaBjSsDiO5mV&CSXCD2cM!q#bB1klMa4z z6gQWc293-t0(3jViWrbVJs35o&Dzyss$iHuR|=g1QH6uu#jz2bM)`6#Xqp~oZS6vh zI#`In4^zMNYG|Sy{7BXnU0bd@34v_^x=#0QRn(mm1*AXix|j}cJ*|Lzq5!|nuwF!A znv1`VPNd5h%g;n_G=3U66}1F^}&yF&V@ZUcjv(y zf5%E91(tPb?EyP1Og_<|^P+-v`SA?hu6L);n={2yZWJB#WH>uR=}}Ty=kuDf$6*2N zy*>4irMx#uFRw&Am>i^jBdvW_?ZzG+xdnnmu#=4Qp$|&5rjvFPXk6&lD^x^cw_|tQ z{X2E~FSih~U52EZ{$VZS`Ys|p0Kpga^jw7;_bBKSpH5T3XZ86(%|GT>ExHRWMaJZ;26{5PP?v`K}EXM#-Sg->xxLu)OduxekU zhWzX_EUj}`R&3}49Tu>l6Bi1p*#i6zo~dk;H4>S6t8 zt+?kIJR6!#u8c!u+u@|i$M#3j{$Kv{v-w~C<(l2Vw+Y`&wc$lv7F+9Q%UoxHpS7DB z^3w*>FDDY(!xqMl5OU#dvAAuyxYq8t4?-(zX%Yf}2R~sbBFkh{Lyu;U%Uz2F&J+L0 z06MR`i||2-$uM=nkMhFQtgk%zv&}!I8&WGtGqIG&@c7x~dJBGDNpEyXJV8M3I$_Ns zOZFo9$hIMhM_krY|27+|Wum41+KiQpM5WWm8AXyz*>3JcgtJP%+oK>nclerG^`k9z z{wEcs6?a};i~q9?GvEL7wKRk-4L?;`Z^o;zWF4|se{jcG-DZV{w9;dE*)}^#RzO|m zFu7+~Drx!7EF`p*x%`2bWzU-k`6p31Okd5$w>)b;={5-;eD6vrwv?nG)gqrUEzsHPt5i*O)>|wbLpG5I72;r@wOr=rFGm7LH~iR%EhRK+Eb#!n zXmCL}$2VaNGALqMt%W?BM=}LA)V(_Xo#>#|bd(iO#i%Nw_Bct7b znPHWQInP<J*U=5*K+F+w{&F1e34CGPw>YaN0bS*Qd^KmD|InDx znQ}2oPJ1pHk%rJ&!B;zL#XzROKA-L?5D}4^T&_lwY|x3h!i^|}!&7Em>XOo(ws{Mc zT(P?e7zYX}S?A#@oAVs(&y)|rvJ$&tIoUpbH-b$FI<9l&ThG=iBs$j@#(IvMXnIuS zilQC3T5|eLIys~ocM(hGKl1d7?`SDppukG?-90Ki@{e)5Ne33W1DEIFdaEoe zCHrJAYF31kgK7)No~C_V(}2pan32Yvc$0A$g6I-Gn`oNCi*}*@6PClE26X|y9OM;F zc~~3VfOD{_yrh7&|I1fh66(_CjvgUmZEFFL&x&EM2as+R=qT(kCh_vZF~qfx&-SUf zaN>`ikGk8Z~G}NK~ff0UjL}bWrlY3)Rwa5 z9L4M9DQd#f8!b=j_$Xbg0(|&ilY|dvd&`@63u0aFt9zI;RLvT%1qMf_gNqM&c_J|^ zm|QKW`6$0{;?{kIqU5G{P77r$I2=an8w&fx_x;StUzoq{=jN}}FPBxMKY)<^yFYHp z|M0p&-IGO<>9w};qpw@S>||kao~nH7r&r6$c^iAN)7eo%b5NJ#tQSBq5(u44%Pu{6 zo|W5GC!yl^dO&W;YRKVxUNpK6{l1JVhzwJ-O&&=Vz*;vO`NS{g8uHJUZ%}`oBQds8 zJ{LW?ITY}mcQ$tk-<$<${t>Vwj`P1*%6hWr=R0e~IGoaHKhiIhaXo@yDa@TG{YCKJVPH)q*xQhz5ZJRcf)~n3nw7m;*1{iG^<->ewOFj z*QqrbV})U|lt#|f0yP_w0`B|;=R+O9>uxrglpgufthIYs`4K-^sJor&XKQaQN@pRN z;I|05?6DcCqJEGkJs!D3*ZJb^|9RNxB}vSm_vL?c6prtZBtAGhV{(|oVnOE@@5x?e z(%#aknEQSPzZ3?m{oK5@Kgc0DLlQxBwTEn1QBH0E>_P!%V&bB%bT7q^JW&@?Isl{> z18tIr*<$nO%DD40z=F@$GexL9%}wX%wnQv7Cm=h4*+kcrkCRPTEavUz^wX;@Iv1<( zz2-`X2HYnyN++O5^vHPo+~tz(qIS&Ep^{!`UD}Ji?t{P@qRBPZxLD!Tw|yXY$fSFz zLHzGTY*WyHiI>z|1te7!W|1DYn6EE(9gtZUhF4^&04dhuHQRZo--+A~oJtfw_{ipU z8i@Z+bdv7P>e1Oj^_=fc>0l+I9hJB0(d^5j{oKljGQ{tn`%BKTZThmA$W+XDz?l z$6lbU0v65sAZJQLBab=bHKvdaKY#Ni*NpXzZY?4tIvNon)1(d`f0&;tdy3+7%{_Q` zAGrTrqQ6~4*HJB+aryfX-7aNTeb;8Tg*pD-JC{}6Zg=&x8}g2bh~-}yD8O7+p4v%y zAsf~gqy_>_l}6FNbKab_g1w}bfv^8Fcn zKmKgXq;gc>h0m&`&AxrH%ed9%zpEB^(%R}M5N&-`0QiW1d4YKyJuI>F5e=-dIZT6# zV?1861<7b(%$$5eaj+5jfGpXx8M`d836jrqmgP9OUUoU{Nidi`Q8YB*dbCc>!o+*i zO@RE%XjznUO;bn{sJqI5fm&bT7mgzr=r5(byt;HD9h)xX?E%b?5KlDOO)Jy`FIE{< z%hh7r4Dmev^s2P5zA2LjzLL?##?Hc|QD>~OPv3Q`GEbF>e$8If)axF9NYf6Y4S_ik z7d%D%aB+uqI^Mt9^0)5)yN6p}i&PY=M?V@$;kgwPToU&DjDo9q!Yu~`p^i)DcgR?| ziz3j~D5`YqqWGqM~>z_O@M83{K6ld{`zXOIT^^VnA%s;=+`ax82 zYR9Usi2|vHT?Ei(sqjA6J2hujEfIQ>QnBgsT3^RXezyEqWa}Qc+1*N4!#!cT(C${F zE#hpiHH=|}FNFI;xQlypZH}gm?3;X#38_4k@6T4k(+2@hZ4NkrHcb^ywiQFvH1Z&E- z;sBw)end3FqWKAf!A+wuYn+a#S%0GO2T`dz1*fYyH@-&HWmjr0^`q(6J-F()WT5n` zd74H*r~=84RW4XzD{vDcetmt^JToEkx@oWJy=Ma9eGzpRjWsx{BI{{^I!P-Q7=YPd zEeN$%TCcOHdumGG71vz@n}QJyR2W-Ww@M|EmMFeYWK$05EW^O6VyOePg%^k z6vXa0G_yL^N69a{RIGT82v%;22X3wpn#eo5!CS`F$kM_{a^9fsIWBQl79%{O<99i{ zM-^IEyO5wlajPm?Ia=!#mJ5qhSCIzoVtDZ#Nu!p{=#rAO58LRc!fH;=vu~BBdlWmM zxZSrJ90G<|PF2K+N{Jg14hW@k0)u|+gI|aS>rO)T2G8pcj52G4vVBjG##TJy-9|?> zQD1M~-zGzP`DVy{i)3uzAxl~FN8l{?t=`$Vh-%j5B@kPDvYon&2 zm-@8}a)GjS^;_Q)#`W)F)z*(>D+S)vMI@{kk3#8U(#v{#=DjSzF%Xfe0FIPef|_k` zyx@dfPKil_c*SYJ`5!uPA=clhHvQ_`=x7Y~4n0*~2k701`JbiYSack_+7I3}?TFVU z3EugC?VWX0RDZwku|N?l8Uz&SbZDduLOO;V5R`OC=>|msi2-SbA*6-|=@>#fhLDB{ zkd%<_^ls02e&=_ebDndqzq`(}*1hYv|8TpuuKk_;?RbA)?}9K!Qg_4etCx@ceN|gL z7zSl~wdP{G9dx-mv^Z)k*OC4Rch}Hc6vxGlXKm6`7hd`usDHZi^F#jY>-WLHf2C{q zfAV19BEhKq_mIWpvx6%Ud-}dt&h8MFN}K7*tnYtT_A5nioC6Mq&XxcBNmt^#jW3Vu zea||uZkr_`e$vn7kq(cqeKmVd!0UqDdz%j)zYnnSO21^H-tXi$EU`E>2 zRLOx1Sd@#JrBe_q7Cz=%cH1yST5e!RHvk$*T9qsLTDE{)FQlqHBhE8R=pwIyA8j;pc~w^xtCc7 zH^ofys6RsMg4$e7;KD^D(L*#WF?`&4Y?GUplXJhz4=N1a%=Rv(a$Q=v36-MFi#VBJqZ;m^1!DF#l*lC&;YDQ1JaIM!IY_Detntg8|J(Xh~{vGQ&7l0PH4kv0`yrAv_Mv|Dh=N) zs0jBg7nkrHvjFE9iBI7LdfN6fk}M{ssi)$hA7s|aRH*#cA-zLDSUgg%Yd-{D)7-Et zj`Z={2{DXUHSB(%6gJrOo%=$}cND|0gKuk};#8G(|CPBjV||-!9jhk(kI(W%wFLlb zKp2F|A$ldDB|}_$T-w-&{_xu8QW~Bt$|V*bvpwxZGkC$(wK#AN+sd6F@zBgU=Ys;f0F(qm;FLFawkCRY-YZVzmW|%8$6u zxKF*yeV5+cw9_%;=e+Mef2(HP7Ws%jFHH%Jxs+&T7g| zJDg|oz3!)D`l~+16=;z(3gpGEOFg46A~x&MCQbzb=eapA40Ok2<37!0xR}EWZo3ao z49_v?VN7o%>$j7ue&-Uu6hqN1lz&Saq>yh4sEjeQirbR@c4@_f$c z{xdh>2qELB7cZRiBOJnNKvEx>3M?ZJq@b<|NDVS-Y>25V0WW4} z>1EZba>aFjrgFZ)3z1ug->K_hAbYGUjw~py&bs3o9Wv&PR=k?Ee{yXA|I6*rX^iQW zfvkEZlhqf`l^t?#^Y6Xb6RSxqn1(Ua!1G(l6`EnXcJmj!F`Y#dX39pJZmhOQkXLhn zU3h5kBdZpTF{Ba-z8oRZY;ZNj5*p|%s&2V%eeC_^xS_vs{Q=e-}?st zzw8_QyYWkHlRzl)n5mY{ui?DuX`C@&M+g*WAnkg@hp46-m*vV_51aT?_+FU}D0frm zvUoS;r0vC5L7zUMZr@V`(+#t_x747oH-EFyb(=x^E9y{WCMqOfA*A4VCe!f93@7Lr zpz?$&3EzB~H)WX`6%E8$e<58a(l~c3s&6cuP_{n5U{PoEL8n#@TT})l$6DA6ukUi) z9%P7hV-M}kiNO!!xLG75T2-1To2>|Sgzf0N&GiA~D217>@+28^E?^uwLE!uKXBGVO z@}&uWkMQL=8Qv+dVHP**DT|MwHv8GDE1XSCZ8*;cH`Aj;O=Y0iCu638Wwwa z8=2IQ>ni9Pn0#_T8SL5GPwhrHePBoyvPz|565x;(mq@=?3C20Q+tH(;rHVKfmrj{Oz}V@Bcgb z-Y+#m0s?8y&dlk1>7{1+_u{g|l#N(GzHhFrkrFM9>>yS`8JqC5KDD?cwCINp0ac!v z8m-CL+og6UWzK#D)0Zyx0shwx*(!P%ulxuXW1Fd-E8}u-h8EeOETtWnMVE{actZ(- zW6vk8t-#0Gz&zJ{=lG2you{X6r6X5hAAtEekRQlJrk**zFnLZ;I3gftACi4FX zyO_&?PHY*0p{}*nMleXY_^)eLnFU|1k)=>CVg1P;1BlzUSsahq^sAB6A+V%M8%aH! z`Z@x+iD<9p-4$@lNkTDRW4 zIxBp5jvSr4LqC`mIMnxvmDKo(O6WWQ>c^7F_zrK&*~eyIH3mN8L!tHOBIXqA&6n}3 z6s=7I3Bkw?Vq1*-z_!`MKD!6PV-H@Y!{Ll{z1^EC?nPJRq|F86v${M|tr-bOrz6!R5cQNgv4qoHznbvKGLJBUdVoR3kF8 zRA52pd!7u=fc0TzY;(yx`M#p-d+Biey27oLFXt8iNbfwLE~D-92h?SBRBZL=SKVfY z{<~0JHk?K2Rs0}YD)bn%HsW*};u+FYeoOf;h=D&|Vf`bG_QMx2vl2Ts*!EgNT@lYA zIn;5$P7Q%T8p0b$j*W5-yD`H>(YmMAao|{8QmjKctb4GgI7V(=>dG* zRCV?@y`LB|ZS_XAP=_ng%SiioGc`K!yg1164%1kYnld5528vM zhm|{CjZu7R-yN_hwmD?u#kQemj`M^DVLib1i%y6mJP#fQ*ewp}2HhGK6t@ADmg{G7 zNeQ8=ZaPpmjKHii-jbtf$W0KekQ{)=i7zjo{aj9<`1yZJoPa=gt>Mf~W@_r(%O3>k z8+td2SG~O#f*Tvo4+6v2*H1rp)&CFIi!k^`*SD0E29mnB3(_gPQa3%nZJEwT1*FW%KEP z+m^V`;-KyXfx^Q-QhpIeztOhf+}~3C!FzwZd39Mz1>3dl)kk#p1Yfm2sjy`%GS<7` zuu78zRLvIsFM53orY3XyHFh-0p&yJ2Ip>bUMjv_A>RS%@Sx91vOheT)^y6OtSv41H z{z(GyCmZvDLQ!osQb1IQ$yRmMoJJN}wnoa%&Mxg|R7lFC8W0u2RuvD3bU@;??K8L+ zbl>p&Zw#=Toul)4xOLIJD&=}Fcn{c3{F@t4^?QI*EP}2-vgI$_A0)4OLJk&sYfDMj zg&1x1KRNWVWMw`}wc^(Q2#*1zG|L=-clW_X3P z2jUF|pedVvt>mV05HBwqiS}vXOd+wwceTZ{JZ;9CJ~Qt%+?3{EMk(=>dR{K3J;Zad z1yTYQi@g*vFRkoU+B$1lIYT+qye3tZ+w9;fXoCW}I>UH-gSg!cl6GAA*B3n2K6&1i z1*TY5JWBI8mlX~;V0{nwZKrd~N{0^*lgt}VHnJTaqtK+!=3xtE!Pv=_2m!Quw_Yt@ zJw#Z97-`+N85|4>UAK7-C8JDzjbC@q+PN&AC#k+%Yb?6+N>O16aY0rb*0U91ipYq| zq|DeS?1=ZLkahB?Gb%Cy2aGIGC2ze)rr`5v%ogki6g6Xu*DqV<9%;fGGcsT=jl|;A z%61e}8;IMA#gs#7 zi0=2ehb37en>qZXWrO#SRsHamIVhq^jp}LudkD`KGv^nWw)zN0YcNJ?aXuy&RacHn zz*g>iwc$hZCtAV>4pU3a*?W7;5IUvGw^_|}?WXy25_1uNyA^(j{PDyzsblaD0?dpl zcCnXgTg_n^h+J4ovs_IU_QX@EP|2dr$vEEuuGpLb#ZX|DyC|)14H>HnVCN9VmapT5 zpC+yiqPjm-k%8i3+U$}YS0`o?urbSYRUY(fcr3YnMGb7b6X6CDf7T-A+1FRyJ={Hd zFDM_&-+8-D1tSr!oWzX0;dGymYC-(~X?3MD) zTrPb9Cukf2fyKOjS!qnRx#%m}!Fr5N3@w0QcGXPvPtPqA_UP03PO?k6lbBcEroI=9wfEsz026z*Ubi_h{1)^srDylla{*AvSm>ke7$g$_3!5 zo*{v)cvV(alaaWTS6Yd}3s1XBZtmwTIYF=vxjbACHd2mma!2dW1OS9}tuQ0Z<(Xq+ zU9EaGWz$UV6^Z_cin#U=dcNz)Ok`7IK*TwA^6 zK}*$|!zhe?lf(TrZg{z_zIWYDA05(yi@cqQ<1=+2NC~np7Jy%;YL!+^*bT_#@=~q! zd%=7u ze%H!wZ?MF`?MRkXCSxlD+X%}1FVsbPK9!<8quN^dMnS>dH7CEA9cmhV*}YFU$mBe`#9 zSa%pz5AKz3Q6z345*3>jm<|z9ryNDWVNd)n$$Ds(^GNz+TvnRaiPU|P6#uSBpZh{W zte6hOaq4NkktMXsaXzE>Xi`DIU8RRlH0v-Hh@3W0uy3YdWRiCbRrKgN`Prabf# z*Q5X&i-IizwUiUJh;WDm!MK;Os*@D-_vFF!GAfB|R@PPNa5Cm$yowOPIYTdD%_I(^ zZk_VmB)csatR8n7>MHk0R@nxePUzkuGF*+A4Sj{>V$C+E!n1nHqrM54Lc{;+b~;1v$Rc_7__)Qyl>G;b^}d9`y}^iT1Uk9U17k z?FBiob}_5`PM(e7B2+`4fT?|%hF-Xnmn#QcBbrS#!gTbuwJPx$-lyE9{LuY3Zoi0j4{gF|rJR4W4 z2ur^kLphq4AKWh1w1}x z<#TW7oaQ#{@P3JJR^>Aoo$pFi=Fk0juwB%%WkADfn;Sd8$g$O*9%7%&^_G3Q#*JN@ zTt$G@u<_DrL-;%w)M}#Z=^FjZM!wOl%J7W|`ZSzU__=aqT5fyFn`(PAO(5C~I1m0$ zuD>?-k9tdD)*q-Qv9r5jKF0_;*>u%zbXPz&f@ekbW=8*hn4BDpt~A@ ziQscP`BkstjP7jK$J|pDC^O_3Y40STi&?kb#$ltbO=Jz4Q7pMps%?GO;<@#CxX6eR zW@wS&k*9gBRE7DyVq58F7{La|ESSXtI+9WZbjK`3B%;dBIU~-jt|bPMl%<^iq-ZY? zeni*6|s6k{lZ(; z>?`*Z*hY%jb9XH5=sPG8btJ4!@uR=FYM|ESU?8@-9iXq$VQBbM8Zeu*x}UEpGP`wAH{|=^C*Xl-f_Fyr0>qH?v+vPEOGVJ&P zw&VQ@Z&w);-6MmW9xpx$expX!@70qr}Wc2 z>JdW`*#jeu{D&5HaDJYP1Vo1LN1KIQiit_?d#=}|F$8sA*H@{b&CC8D zf$jK9+jz&?MfO_6xuWS-s6Ef^sGo2x=WLT^*?2`dAQQ@aV_uQ5d%cdPUKn<99^blF zi|~jIy*7;e5aCqbX7z(0PBW$rtx3GKG6kJ-~W6Y0w4J6*!VPU5-ky*~H{VixOXPBOsUsqXdD zsqUKP+1CVb`)SAuTE`T2w)5-cD&ij;>DD_{h5@t101G=MU3Kb0qA>}saW^Yr*DdDW zL&xtTthRuEM_YPQ!~Xr=r>RpU^p|M0>tD%**`CxVE)*$ctBYw+ipXFmf~>?`2iXe` z9Y{oOV*wW%SkFAdo=09@ZQYy-3>l4G1>mFZn}w1T#RX=;q1i|G7L#gf%SOhsd)NB? zJjC0RyS6MQC1a))&(0NcEf*5e@CjVEm#srovt&HKgJtV^wK?mb9_o19p$?cW$x(TR z$8^0b5RNU>oVQe@%@X>0oG>~J<6YYd@Tzi-3^CdSTlN5^_xg-CK51et;=K%dSC`fF z@0M_dP5T#H>m)O)>n#hRLnb?#!GnWy3IiU_77B~+CbkEF8a`Pb&y5W&N9xwvSz6u(RF)Tp9^)uY8gk;YH65gzoU5k#G-(b@wnhH0bw^Ui;{KV_FMna@BmwD*b!;2 zfc2cB%$>B#0ev$VfeCdZmiWt{>@g>`>*K-L;tZIi>BpV7rb=F1R(?u*KoH|9*@1AU znx3>-70Y+=xvcN=H(Upd>x&WtI6~~Nmjc^^AeA>03t>Tl?3uiotMO5;7}oiU&bVRE z@C+j+;|4{386-pqIEm$i`4l<&2^$GuV~N!~jO`$-mauYC7%#6=n$eD?6_49#+J) z8@sMs##g&Ygyu-dhcP3t@hrHB`HiorL<10eE(-uwEL$g`i|HdV+Vln6bE875+o*dB zGooCs@X&$aJc*{t{9QAAvo^6*Dy;#{B_!InAkJM0oybHh@;nNER&^vu(L6?YQo&wl z#Vc`7OUoVad{FYxEZ zWTl5Vh(G~4EXp-TG2_7M?l*bA?*w1^XB^Q#2sF#dNX}>@GQaGQRrgOck$xs%WqAFc zjeq}SIMKhQ`xodT7o-1*0Q0=UA8Gs#V9!XI?@7OV#2eW--afXO8rmrJv2~TN&p zGwiHgxDiJ3>~NQmN2GrZ>`Uc+#c@Zg4asATReRc6Lq|CMy);8!pYb+?$}HM=S%F9y zJ(l2O(TCpZMAX@BS+JvpvO@rdqR@9bKO$N!@8Q6_rzyq37(?r}XLIJfaWga4&7HlN zxK9H-;yD?fqOmG`d75Y!eAZFJEtu~)M<}#}$I9x}`{M?+a?+F8hihlAqUlqgj>Z({ z#pDz#;T1P^Q1v%`*;VF;!(4_ynXuB0Vd3oJi=;DZed#Sj@o0 zm58vFh%k8Q(&dhOs=M|ABGD;Al=N*CKhUbi@Y#X5#cRLyx6e|;SyL1Ca_~miRshfy z&|Yl)h6mS8k7c=^6A)R@K(~th8-e|4LGUrDV8lU5MdJuh+v-@8ahZlpBFAZ9qWp;- zW@X<5i$)?MCx9=N95HWH zNx1@4+0VtIM>Vx{a;iQv+9ZBo_$q)z_(8yOEBJ|~PQ}j9$pw`u|B8?E54@5#S>x&< zlYPZ6UQ6}RXRl4;8XK@t(W(!QoHR4=9s?Y^Q6)b0xeZ+U(&cd0DU?1_J@vN#? z2O8@){E(R3XV&82qMQ3Ky9d<{yP9IA`WR`KR0m>b4E9-<8JR_jgX;n|4l}gN zoVEByGz6%YgJoNXcYhG1yn&2GFsu?))DLGra^4pr`}$r?mwKIH#^yzEMqF^a-rko_ z3EXz3?`ARvtVgv@OTetp01}nmW)-y=;E=akygs%;S}}`N75s>A3Aox`pAKVLE6I~%Z@Ci3bP@7}@8~8Jw zkNIVA&)>kG;5e{@m>1X29sCt^pIlSPl-Wz<4Vo*h;zHSaf|8t}VzlW`L}l-(FWcSK zS|G2xX4ef=AjTkWP(d6|*Cqfaql#^7WPiOXD zQ4z}7%TrQIF~pNo;c`_xI^R4xDr(JFaD6~j`jj{dpi5c4pS(i|1Zw@oi;@4BvH4E? z)2Z*K)(2nOrc%{ST!+TbZeFaF1Gsr~#_hg*Cr5875#RPX8m~57tf}QRX`vo2E{eJN zR!e_!x2Yt-rBb3pxLvWn*Z`j#d%N3e4ni8OO<}P*rl*hLPT?poe{QnMNr>;+lSu(~ z?)KDWB#oh^PrxM3KM2mpv{Z|VTi+`|SqxjgdfMEmBguLl3l~;14MV&$Tz8EK(DKRx z&r3#o&z2R6z}R3r($A*I2rQBfX9)};o9NM<$P&$?SD!+2FP2L1yq=*xByF;7$!58+ ztWxeTAp2h6En7)c7n}^F@;JxX4GQZr^4#haUBh&{j;v?rJf&AO(2P@JwskUUm$O~wNJq0N?b&x_L*#iv;BlM*oKRcfAUNZeCH*bSRayPRzJqQM#5uV~P*~l(wOnR~>*Q#|6r;vX^OUz=%C&$LeacN6TdHT5~$;i8Md-C~NR&S*CgDPGX ztOct~*VZuqy#9H(plgn1VUIGhx7Mmhn%YPFYu&z*W2FzDk|OKUmaf5yuks3v%s79$ z2DT~TM>O%cVS@x+cJs{a9H+hP1=C=j-S!(RR;WTJn7hGjjE%+#_6;UKa0EobYN^xiR}o1bH!XC{e7&skVH(%F3t?<<)f^mj8vN zV~WV;%k|R@>$1((p>2i{dNu5k{J%b_+H4&Ip3qS>Vb7e{PAh&8fR(Z$R>@0V6VNA^ zXLmOcpC*Pd52$??9F(4E9|bz}Dyk}@X59!&bJ<%jGpghg<)>ApPpSXWh|489mpC29 zm*8|gGS}fS+gcIVpz>|kw|;mcXEwSYV`sExgvY8>xvnBN-t&kTb2of+c&9-G_hq28P92vHgOhVUs^;4%{ zfq0tPTJ8yyhkt(zzipKBQb=#7c@JFK1PNw;2Eaya8@k>pIt{EoZ&_Nur@xoX%aOFg z7h=A7eC4?PIJM?Rh{l{B^HRB{N=pX6ReEl^X>1;JjZ@(SUKx{-%*|Js$%r+`U0zqa zzRhaXLCJo2dp|kx^GKYb+P3=LR{+hDEqaXeHv? zB$q&(m8U55&y7IBZF;Tpj=9Y ztVRZ_gDI(DC3}#P=X>zlQ>+bw=x}ngIm>i7yX^C!h;`n@$@rB%v7Gry%OJ-VNo2Uw z+)YUw}oU_xz^)TGt-_Ab6(VJP~ATV|td9eOkQBkpMYs`ba=H z#4G#UYJJaq3B36%N_w>N)nV$OvEI?&KVm5VKCRJ8=kbFe)fcTSe7HBq)lD1JS5gb} z$S$CJ>tLr4X>L+hKgVomFsL*)+cSrOcMK;$tp^vva;QDL>%koS*18R}aWJbab=1KB60AmA$y}MZtB7G6p zm9L;Y?W;D~Q?gmo>XI^%T$Ed6;+g%$z=Fc#!XA^1(`mR=zz#&)g*Qd=#uhqFr07!qtz1y3xR`q2Gc{jBU?+L z{hEhLE#R{)j|Yyubd#vA+}j=-++`GIm%mqpff~T?!{Y;jYd+pvV=ZLNDnXk)6H#d* z)0Z{}enL#!e#na{AHwg&H$yjkBAsWILBXJrO|MQf^dR9xSYUj0^4bd7+83)tP848k zXN$g#RL6#yx}n-d-kbCG>t+lK;d041wV4(z4`Y65UK#T%^GaYZ;V)nk6dncgTeh&J zczPc2Z(UC~bFV&-T|s$F+IuwebuB0Pp3d9fb-?bYxaGh4Gj#H&9O_qLTep;dRh{xD zPLYu1p9h4qb3Tr| z+D8NSo=~>i1Y=RnMiR;po#N1Vu&K_*$8zCY0(HL4!E{INY29Y?b zC|OFa%|udNOpZZo*+ty{+)RKcT+#HOFiQ32Vin zB}Oq2)#p)7N^(AzRnVyR!e`Y zPV1@*lVoClLmFGCVnjZzfpaGJztX+(VjD_nAMFx1ASq@Jo+C+4wiV$A0u(h9updNv zhue^=s4DU{ZnL;pA-Z=}yJ%Q#j-vpiHImD{%@0dQlOYQ35RB&LC>YW;yhbE3sM zb)|8(1QktolJ3n2z1|143s%#^<#V%9wh&kS!H#FxjQHRhbN&#zxVYs3!No-LHKfKc z-MMA}7R}Oav1E$K5?JZga9YEe#X&5s=C%5N5Hy@S{`w%P>w5695@cibD|?4LPo#M! z?7lqHX;Qi^Xt%Djk1p~tGVlE-Spi#Mf*Y{@Q+4|WhD#y_l1_(s zlSp>EG7mlE!76k2Xmd74MctQ7x#v-pT`}FX%xKQM;A&+gmu8OHFej8X@$J*qs;k<$eYX^mKn&zj+wFQ$+d}tn_9(WJBL+l|toqSL_7}lIj^_Fp+5mUY4u2waWeO*F zgH&9U)EZS*nL$0r-Do}=q;VsPHMWMY3+y;;`%)O`BP5JmZ7fV=n*=G^-)hH%__B~I z*H7y|vQ0tiG^yfM-89KegCN`2zP-Qk$kuJ9O4j=eTBy~n)=CJIeZS4fY7~y_Y>el?+aW#XoC`IBKW!S*eZtmJZOrk=3ohfzu zM$Gd?Jt)jxQ;Bva)_elG+SJp&)tRdyL)XQ>QA}sg2Y7ero2)7KYnuDQmc z#i6P>X8*B`qakYO3(NS7iOKB8nb|utG{WKUE%3sHH)IgtN4Kb;U=E+Cm@E!?ZmF<# zT;h96KuqfOpOXjv5}l7p5l{sqZ4P=;Gum~f_yDVUw2^s7h*kxqw5c1eU{Xp#QKMS zqPSsOEwo)GxKu#M2=XvA*kT`?PsgQ-h={2F6f&yWvjIXzGA#NhJ1iMy>R@*t&;!FC z7Oo!5Sv!u{xg~iSAb;wCIZ(-f9yr8q0oW|LZTHV384#HN-vB&_45|xJu3K+nEA0+0 zTO%>vFoTAOC@TcA_*QYS&`r%i&VVE2M}M+s^m{V2A!0*)dfi8^+3!CU(6fQ;9rl%| zn6zv2{4nD=!;S62D>s3ikym((ux*;VL-F$pT%q%b?cB1dHFi7~&iOR)EK$|0iyF56zFr>(iGN#XIP_p0#WV)c=4drP?`?^Qg2OXj6sVjmqL zN_?~caUV0B-g~MB<)cE{H{Q_?&4>qRAtvqWQ|;{QH%=KVs+Xe@-wn$dqItN+3rJ|9 zEADC^x!sa}JAc_8DwL7~55s1R=bH7`4+h=Wx>va-Y2C+2Kn-(*k)9zYF=P znweHSg0>QzBeY*V_Z6M!s z>NSFx1>pr;R(X`e{RdCj>KA9O5Dv<4tdBZVNa)*8XAUN8nIi@n!$J5{ug6PM%7^}nx9I1}Ua~7Eb7K^JWNz{Y zSah*GC%>CiSQQ7?(#qlwi-6Pxc>5vwp&#m;{gH z<``7Y4h4Sv3R+No5#V@rg~=s`Vs6z4A{_H(oLh@|Sh`ehnMSC`UIR>nGlh-s(aT$H z`gt{&OAn}fWLdN?I`Kaf;&HlG@QqF`0jEY2&f~hbYz4+?W80YN{koYloP1I*$BkP8 z-e_v8%nBi(6PJ3yLQ#i8uE#_BfuxQT72$;# zWVW3zY>Fg=2>aZPGA1GdYn^}Z8N9jA^mX}&b!|z}J09I)Gk%+<4+;8uPrHmtTh6Y< z<%@7ST@e9=nVePIeQoHQwWy>u7|rTv;(B`@+DK7V=(Q76F~>_};8#&JJtR0JGRjX3 z?A7;&tt6am5A!wKfAH?yZfa~A1OE$~^&rK~U>e)ERTb}Uw>t0WV<2aNH|k@VK4A(> zHLLcdva)^PIifq09SHfTO3HOMzPQuMS!XSfZ08-E$C0VUxl~z~!ZN3Iy*3azE(iVC zq=Hf^N2iwtOm8R8Y7I4I$VK)VfS&u4(lEg_WyymdNAgo56gA^g{D&5IuTMa1i;H4s zL^i)dbYXH1kL>;7Ny&mQNB5Y_bmhq)aKTA}^D}?gD*VIQ{x8$x0Wtnx;V8dm{BKtvv|Hb)$8(o0HkL800wMC*IR)s4f70xn z4c~2ThkvpU8eiaeclOx++U85vYyFB$TXOG{dIz*d9(X-97c&gH_d z==bD8UYz@o^rQ^p^7hGKPFZ9gv%ot|p=-z)3e^`%pBKJo1MtiP$N%=xkHGBpKP-s< z=bE1Im#c=kj89psa$kLJ8IIJBY>wSE0{Ib=WX&JiD{KrDH~roLEq=UOFh&Re?WVX zV6{C<^Np%)|7*k2_Ij6i?LMMG(P92tVO4$KGVT=NQ<$zvax*tKzZM5{UEekrKra$Y zuhKEOe5X%bZ{w2DJ zs)}4Q`(LA*>VlbuenvOl2Y8AK3=LnR-d@8U+#@pPc663q3fNS^lX{ z<_4bTq$@mThlgzBBH?5_lxdYQ0UTb$548Hi%OhUtS2xldK7b6xtVxv`YK*`y?sbMj zq`p9s%QbmQBInD5Bk#4Y8KfFGFdDgfEW&P@@1+EB^zRre3#qO93fI5+vID=OTbZXQ%qdY7SX@`4 zqJA;{94)J}OQx59Z`;B0^x$Yxw!?1JOn5;jTcr$SV9$SeH9gWcEP@A~yUwIJ?8X?{ zcj3VFpw0WT31b*z^hxx$ZwTd+nPqX;U1LtiqkcAfxA-1x%P>LD=X)~3!1vit)I6a# zzyRMzDOI1OJ@V~hcW0=a)Hvg-68p^t``1x(&ja@?4p}zBssAIgf*)dB7Qj}G-OtnFl?sZEKzK_1Bh8!K~+k|!G|&xmN(E$_+g zH$MnI?AspiM|sEh?rwV|c|H6$y4|^8Cohz+2#I;uZP<-7H-K7DwV|~ih5f}xsxd^FC7u4 zwG3C;3Isx9>~DPA`X{+Py{2FMBZL0p{drRH6?a8rjD?@A+4wQ#tp|I~dtXEJp9R+I z>0lFOB7?zLF?!y1d3)4tC$D?rm#b2JiOwi?R5p^oF{bztUvIcpo3f^sBxR1PTdQhW zXHeW)C!O-0JKB5Aa_8@U%6&Oxn%zo-D(&0X9dNNgicY?Q;8AtA=n9XQ7=EJXCF;>y zXI*qTW@DgwS!GLy;B8l?GM96K{A)y=u&D>1_^eGdza*=qpTx#Sc#p{C#|nw+zBshz zD4*;KdK6f*>$uR{wVu5Zmo&3=ReRZh9~7<`?ZhY%pR*OwZ_k_wZOG$E1+QqGV|gtH zN2KcV6yTm8UAO}61K+7N3aFc9H0KZ=*x>N&r#y+<%2S|QE|`ExRS-FBjrtszZLu{q zFdsDTUb9S^^s3tqDz-gLz3ai2@95@p+$~=3-Dq|W{!lK#I#rj~2A6W=2fUW=PK8lc z**)rc4iV6HmuCd~$=;Tt9AY_rVppo-vBkF$w6zGe)az!5Q%omse44#z$AqFvxi_&< zSz9AZU0#&jQNFOLTH(WsapwZ-DpGxEB!V~WgQfURpTNI~q^wI!uV8&GJB<{UamiD@ zTjA@*d=vB!Xo1%vMA#Zqf!Bt1^62K;bhe8rs`Pko8D(@!OV&k2B9C$^ebfrof!ESn z*!dV%mN~p#Bw@)GQ}$o-q}&ekiOIyuf6iKwFxWEa&SkdGE3*(h8m8|+>+erTccu~^ zM9=&n7#2%$@>tvMW2moM@D7+uDeS`Uud}c1Zk>K0G(SiCqR7Xvrp(c6>4YU)w9bFT zGp;AhG$s?*(a*9hk+F^K;-Iz93$+|R8oBbz1wn92kC65k%ffl_mkZ+IExn6SI8b>o zz3taO-j7R~=f9e@XwV1R(~P!dJRe`MY2RhfnHkxT*PAf3qVC4XV>t!jF1_hy)Ixs{ zs4sZ*ena((dhSiZx2U2#tJvJ8>M~Gc=)H$DF&Ck~UMX3Bxg-b_o>N=@VqruMrTubA z0Pnqt{N-Bt2Rl_S3(+hre~y z|FrHw<1aV>kbW4GDl(j?k=!}EHI!uZ$f{n+LJrieX=nN^YL`LdaCI&QCNd&hw(MgS zj`g=0i<1%z35B=sQF7Ewb}HfoKIqg91;4M{zQupFQ%awxkG4#sLk!z-jU)|muXTm7 zTAdL3$Asm7jA;Nc`x9}Oox)hWur*~rUcM!_=iL1@R-b2~G(i)as22s*9T&sjX%}}x zF*zPT5ho#wo#wlubgbOJT7Y0QX_hzYOTpb#;`%g}PmFbX8U^C?vb>iy4p=pthTEH+ zOMd-FhW+^!jiz`p}FCjuVM_RH9#Z+fkSla0IKd9NfugF-vl#7*R!qPAp z2!JPLvU1j>sgj5`IyOTNZnR2MOS2ua_4fK=66$6C)fGlRY5rx&k5w- z+tNcBrpg=xA5@iBWV3Voc!}oc_g#sNqzv_@i{E|c z?{Vb!nEHEu@q4cIe>v}@*!h4pn;YwAUWA2fhILMBRNWwg8>C3=g>$-XmTSt2NlU#at6sr zR8dfj7tcT5+n)FD{r-LK_{Y6(ynCuguQh8`m{qgpSF2Xltopg|a~+^S+|;-UfIt8M zy8Hn@H$e*;%F4Hp`noqYv{e5iU=X-;5McoDK>7LTt0}RYnwhiXum4AiUtzZPzFxoX zf8j3kJ)irPI{-`z{zaVsx5tDIj=uJn5#C+?xqL2}Ulx|;lBRL~51Qu}ZTBCv+%G!N z&&%&J4)Pc6W2mouNjqH9JkGz-cE8c~UOvC#$6v;g^SJH*i`OsrtHdOZC?kW*JHh3j z1@Hy*0X0DBSN$*jm(CLd08(cFfRp^kGrKGRXo&y-hNVBAaTNdnc{l(xzxw0ZA2RW> z^|Ad&c96>}*vSb1PRjs*)C>TqCjfxR;vZp`m%k_*`(+TzWw|^rf6l;dz!6{vGyoJ} z4+vdSqJS_U0!aK^29yCD@Gtl4igW28c#vO?01porkC1?fh>(Dgkcfnwgov1on2?Z^ zl9Y^`f`XEQh=huoih}x*rudZ!=vPV{2*G7S3SvUy%cuXM``HCh5k%FXK>B zQ4`S6(s7tPo~4H>&M=7R8QY@MGp0F3l?;tc?d&C`^aBDLo0_?9JsxNMdvRZUt}x}FO907b z(#wRDzzyI_8A3lqA6-CG!h_3;0R4*|U)7?Yz!~x2lMOWe#hxL)=2TlH>?d$bj{Ezo z_MgB{z=JQ2_$u>a3+^V$;(q?yd(F1TFQM;Q!qE0@gLQFw28`ugPLVQoUD*~IJ&>VB z&2oan0?pkb z(S__)3H-wxJUG@X{UQ~!Oxbg2xB8H0mNT+gW}n@+6p#Gq1bq0>-7w(lwpC=!eOpiK zJNbggUxO_twM$A)T&2~N-sZNdV>6j@W9f5XiV8Es$rM1zwU*|PgC!%%pjR^6kA_ev zYx9p;&+^SjA0rrN2GdPyET7hpJ~ARf&g$XPs0waXgcp_{i0hmhG;+7`It57S?B(gJ zl00)S@%vC>vI(l*b>Pv0DbBaVW=&h%O5wQRAzLi*dXA`P-)Lz13HY93yXf=vns!+w z%qMf>l8k+1=)|f94Vq+WbbH-=u=pbBd^cZrSiL-+aT47~{RwPLXZ7H0SGZ%QMLU*j zQ~WTcEr~)oo=IA9$<-8EMTcNbH=8vm>mzvJcm8mD!K82E*2Fg>wl|>sH9cC8&&}(+ zD!UW1T7qK0%mWct{>JKM8LHEICjmmIbrEWB6qKbHd9-`TS$AAzuvVT9%}HQX|~7Va@jV$W$IfgFnG%!n(LHwYa04eVWZ^i!T}= zB#`0ZWPTnfYeX=km8OEWT6$Az7k0p41Wwj1rwJ_p&Iz348=x1wr>T7+QZXiV0SX4* z+#qqcxV{X#M~&uLBY|Vo$o_RU!%^;K7_H|?Sb4~mf`)T)$9cZ-EO4UTEWgt`+e)0tVQT3*_-TxH?~)IH zjmu!q`(!%J786n_FKH$&?7Ed%4d^Xi5VegeZ6 zIC2HMyD?4IvSgu4%`6>;Dc)m6y)k)REgZ2%ECPk`_Ka}h5D;996p$;a3#Vh*!SOr| zYO++Pr*p2<-%bd2uYPYLh_Swo%1YcRqv@wXV1-DOVS0l!pOaAwGu$(*__wys2oQJ6)x)v^$SooP8ifISB!3yiG zjD1sXl#Ig@wx2ryNc4opdO&b=meWtKJ^8Txy}t3t588b80{1qhcTzm6 zzC^Ek&#H^oXdbDqbXXg~t62as!HNh zx}Kn?$K59^Ohcll>5X6tF;UhJ*;*V|la{znsAzuf@ydPV7HNIu{R)A`Cs$;6okOYb!U3^M>EJn3pvRntqI8Z8F~+ z!8`5yWYj0%2k@kB&7!#e?U~usvix~KJ+*`kJ8l-!Eg!Yn-~W#d&>@d za*VobVUY9++LylG&`h=w7z!WLH^cCyJMB`Rao}?$9~xiS z;yxNxu49^dr}0DbT2f-eNUQ|w^~zUNt!pB;`?~ko)1rE4^h~>Kq;h;P%znq@NA%pX zSjoryD{H$xEZ$!b&s*&Zr)4_d^T!<0#amYU5K7$_QLT@}0rf7=vY9B($lOKa->_WJ z6*9f>*%-Z_hhjD@dp6}U7iYK687AVl-+o^#zr|~XB;ep(7lF^aafAKxFG~dX%oa>| zSIq+GC4C}W1xvO)@Di7nnvd(Vx7e79Frh4RMuC74i;zQ4qTwkyyJJ^>@0 zSvJ-DckUUxR8tSQJquc=k8{}$in4pFqK#26hd~mRd2BfvDTF474w;ZNI=0(q(m~4s z5z~o#naf$uS0ptnKSbfg8HEt&SHE1Rkt`qc0^#80#LBP~mJ@0}?xDK%>W=pE#p$~I z^Vv#E)U!(H%bdOpWGdph>QDrw3>QudptX)K(G$IYfRDj)uJGr%s#D>!Q%&J5?LP|tJLhM7LLsk6d;cT&37Bl=2E7$#H)^pC-B;X&v|Ege7+1VX9?65L3+eKf*in(#Ut(JjLIfIm3YfG#8{L-L`j{T<)u&wQe`nK# zdQOm`&2pQ{>$6>)BMBZ(Obb?_`cb*x+5gP&^#4z3(n(_4L{%OuW=D7@6)`MPYEsry zqhK@mH8XVO0({UxrdYcWR(t5x>&X%y4bQ};kubmVd%Rkm#bQgR67u=s^+W_pO-cuT zm4U_QjKf>7U=jSlENtwpayk=&#Szhv@a z9>NH|8?QWnwE13ny8PW>_iMXB8QuOBg(q2~ZDqtxFUrRKf3P{aFCrXwI7;VJ>W)5g zCnJ?;vGfKd7SFEMj+-!SX4QU&Xn1|O8Iq}A_|{SM)N3Mc!1Vm?=}0+9Z1!Hol=dR^ zlG8}Icl`E)N5vt*92R8v9-fV4o;kffcp|wvl_%Ar{{G{)amO(!3-Rv?zBTW@@(|+k zT^TmG#!D*EnULgMC=eD#QH5;J<6GUAkMwCBpc#vL1J#V061&Hnkro+lEi=$C|JVvn zSX#NNlZqP5pO&!XjCbbz$jXJYPXx9_Bz;)^2~=ASR7(5=3@@Iap!fUQ)so6~eT7`R z7i;!E8BQM8+GHvbBGaJCg0y?EdBc-mQ&u<7ennYkekKi*qw;XxQzYcQu;ebyZ*7uBt&_vG(5&H&kN&t~>vM={6O z9lNFaeQqdD!4;B!iT)=b|KiKe=-md-Gu@n%eVts3;-D`NoJDtY1+#xVk)Hn?oL=L| zn}LqZvA#LbF!5MXk-)gzN+%ulF~3LJ(mvLy=OZO|&py+oN<=+Fj|}xFU1POhJRA`! zfJ_^mlxavh=(^?zQTAU?`9hWo`WSIr`uHa>FJwQP^ALOgbs)lTbCq*o`h#?a*v9So zj~C-FdX5UXE;ZucSo}8@|Bc1}jKyZua=ONcrA0ixrHIEtIb{=L&@z)0**Vhj7&#VY@OY#iV zds?NL&PeOHnIthvrHaI5OtDefqb7tC*kNT!vcky>fDp78D|=QQqii707E86n(ab1q z^L3Haik|)|_Wd$>MRwH*33#cQK6u65A+m~jCS%*rdtx0YtQH#Wz?cGN%KKc;kE{aMF85^}iiZOJhKINs{KnjjK=Rv;Ixlsf zIzGYndOHU}VcJ=Uq>GWdY+OaKXA+W?-ha_^;qgkuYyTU&B9!WJQHeonCgGU`Kqewf z%NCe#fwL;e?8fnpKF8cq(^?J8tK+`v*!j$11TW0H^5IbyH-i{tC!K7wk)Kbhjhh4_Gs|Ve6A@_EjuJ1MMb8w2Go)F$NG z)RDg`XmOQgUz`Oy+1!WLZV(T;C+}e-MHm$@A;9y%pcUmaz>+-f(X#w)&|!}>y* zeB8Whvy@AK72TZWK+Ao!rD}(lZ;P)s*XugnY@sK#1fN{Rf`WD6;CJ&@P#0T}#4!So zhaeL7BKQ5dMBApo*UG0Gyz+}ox&B4hU};6i$jvM|mqnEbr4(8nhfkioL|27De&W87 z+yc9-xqZHY$CVFS-TIeaZ05_lI;#%}ndqP-h+AkUZ5B3@pQaLF^{6Pq0NHyG@YCF( z2Lp+E-PEL2?q^#(7itDICXRMvGW<6a3yYbd{9f5P;Wdm7!w%$_iW87Df%Ou2AZL7` zeYJz!ab$%|KSg!RbdN8Uck^kstPh0lctswnwAhXXS9oX}5!rhiOvw8jR(lTLU}%5d z!&Dscc8=08CeD6=x>u`MraQ(&bk~U>y|vr`Uy3?RhDPl{$mIu{3UM~hRb36ca)Bhu zK!k2TPp6GizS3+v6}ULaRu+5=v-(In$H}sgSjyz9a~f4>3EU z(;EJal$5+4HrOVeiYvBs6`Wa0byyAx@tbfV=F?F{%elARp=}jpC@4ywS%NKO5y3${xa?NBWugiU7rYBePQlAjXZvp#Ue)WiCi|u zo31f^;DU?-9Q&?q8dIv)N6#e{LbM-6BP$WQQZGtk$GM6aYS*ZgP|xd$n3b5~O1UiF zs6rP2qGZDqo#UxDo!-_c6NHX2MiT}Ia)W;e)APpS@kM`V)Td3mTu;)I?`3@;v#V)O zC6U>Cku8h$c%r1WtijE8yCnXKMLbv!?VH!o0-1-yNfxnTg(EMKL_k_Ji=uD%|#6c_t`#ItR3oS6T6yOBG*Ufdfp`2rfWyx<#M9T7|_XUVC zn+fD@xSRK75cZtn23OIn4Pzo;b+vVf)8(#9 zdFa}jEN$mNPItjbkl|g;+|K8U<;}x)g_YlpgW0gQ6R?O;R`m*O5r<-G?C;DI((5qh za)Aqz$PciCQ5t+UnRVPobQK_xU!dux<0lmUW2PC@@*8A z`&x{J8n6%14CG1@ia=vwDP&;3IZB@BD^IP}ju)D1QH^#OE7e-TuiiKksAz{7)a(3Q zYAtotOjpSSNMu4CfKY&hLU0fNX+;ltp;kMzbLb>}&&p^gqj`_sT@3e(QIfN~)~Y)<#>B*ufL!p^v4q*1$eb92Tu2Vx%~M4*H;Vw z_30Q|#>TbL_WWlQ=%%UI+vDC`oO#LZBn{pP4!$yy!`!BaE%V#Ak>`flMbYLy9w&s0 z^|wAT=8Qp=IO)8c0#ahrMN9d`S5}O0$hw46RB1H5GU1`E{$Mpkgg^N3z|-ivhspSzEJ`U%iCH>@rBj+feUQw+`XOH68nvJSAXiCK&gJjh97 zEllT0I;BxjfD?hW5%Kknd)cF7!`ITPO%h5BsKgSjI$b1VD8Ebeb1GqN$@j`@?-z)N z&JfpBJTv+D`tHXJsV)DcY^9HhWB%($Z+^HoEY?WA{&x8~a7V;HYuwAz>PB#+RU3O? z(!jEbik&Vkk14)O!eo{C^w_p}qVI>$IJ&+P&&omHx@uX&j40)DW$6f-@TB7W;t_u{ z!myj|m7dK7+=Wn_kM_Uj`RbnQE19ftwDMKUr^~7^NFd9jph-^wjxQ*!fJj5kjX^z% z3xMY`FVW%YeD~{;JkNF7tR>R5)Tg8a-WRNKseN2#NFb^Rzmbo5c^Wc@j}&@ddUk6% z_;AW{D`8H-m3T^4eEv~G(M9Pir_R?8-`{f~tl|?rpU(HcPRJ9f(`MEjt3_}kjrSh2 zW~%&gCG(i1LgisHhT{~_OD~g~b3*Ce&-3Z@;LA)Y_Bq~C>{L(MZx&&qH6VB>uli^Q zRKPV;Zt`F9beBIalRI#t(j&?9Y4D)Qq+IbK-BRtWjoNP$Sz^?`T!7?IQQiiAWj4aE zbLZ*k3g$v)Cuqq?gj0QIx&Kj63fuH;J2AdeiXV65@P~0sS>f$pa%LZDf2z^&O_Vgf zD88nCG;d8dp0t}IpZy{DC(w6%Iq)wZS=PBTGs=Zwl-Y4w;uYp>-Dd8OyJJf}?PK34 zT0;3lDLQHpbkjKZgIQLa>Z$+m`$nwY&6JUr1ibgq8>I`qE=F1$-_GdB(Rd+g`h`~i zHPfe`nsYx?WNN60`VWsa>f5M!4@xG`op!U8ZV$LPA~?TAP?9HuFEc9=sFL1oxb73T zeVD%GYS^2=nc>Mqx~cL|yDoYHo2h(pAi%~$YV$W>5|TYt`Le0rx3f4mY!Pako<&K_ zWPD`7!6Yxj*&jr%x}H~&&Km^p(i|I)Mu|Sl?^^pia1n!KrxP9E&d;n>e(`*ES2NeU zePE2^u9{Q(y8yq3Vv9%fId6Yl^KkxKaJkHj@$W#!9jBcBmciYc{pV-J2zN+zWDsC5i*y~__rYQci=L?c)n8xa^+@Ec;1surLQ{LP`jC@!&!#E zhRttH%!!vdCMCMkMP&Xs7dHhG(d1sct5=0KGfdgc(=IhK;43(hiit8P_ddQd_G{Uc zU-|!wa3>wxakL2h#(2D35?gJ?=wO+w=htAAVns`YK@``+pcOpI=u+N6yG)2I-V5X4 z9F!@NU|Hg6P(zZU>SJ)8KfRu+5po!djm6sYV55+G(uuwd{UYHiDmQEYRtGKmH#+D4 zL$r;`&5-}U)bOVKztcG*sobK5MI#NaS%~tl5)}$Wv`9TNMM{>RwXLego`lO|Leo%i zwv@%^=EHvrdBbzH)Q`-q`k4fBeQJ*8%`9-DNh=?cpIw-1lwIA4#lmnTnvm$)I3;wiVY| z&7S9RJqO7@w)C?T95e?7n2juQC)Ynx1@x6;_`P0yAWQY^2$+80Ngt3Ma6H{$1t(X) zTC;w%W;2ZX#N@C4?k(osm*N|(bM0%x@2`81%k9*Nw7k~jrUbFu=P71L57_V-4(kqK z9L4#ZsR>vLU(E8LwG;eyN6Z&fqg^0Yr zL4G23P_pq_b>mF^p?8&ie2-_#5RZLGLZ(+k<~CdDMX~09U{ri{Z2sAB~kvn`xOXR~FJdHYElFklBnqo4{Fqp4JLuD4O$7^twdlzG*TTZP9P)-16 ze=sRCx<>Zp8aY<^YXZWJmNu)dcS=-;b8kjQ=E+%pEjoX)EBkmjRw^%-QZmLbXR_gsoATHl%6nj8e~NMq?U%A;fb~r znWCpSYD{AxAvHBETo~4QzePGmguD?;Xtc@hXfVs^$@4CpwA@98AQL$M)51Owyq`cS zkME@Zf-Ldgy_==q<3!fVQFMJ?exXPn8ZCW-X!k}fh;T>%Sxzf|?3G=&js?lw9lTd8 zwR&)^oolS^r5bjT>!izu93!pbqzD;;38n{s*4#6t*xdA7E3=HJj-jYUeQKW?^Xdyn z8fVze@`9`sp%SA?dT4ocHIlVtL0e3E5XQqm==q08{}7U!0n+sLM(hc3?;MHhN?c5> zS&to*bdFC`AjGX*L&y${$P=FTCh%owCo5ng1=am&rCp8{keOkh13EOVPxitl+fVZN zL{3r_>nMf83hIYphEpPblncI~<~KH^7#!>7-Fv-(mIAYVq<0EL&ObP;4GHYGA4`#< z+4IDkpDHm7<(3GecR;+2XDTkFFaEU9a`#sWy-h#i&c_nW?cPJ7$xg&iD|>Sz7KjL* z#D=0_6>dHRWsTpn$?^)n#*9Bs!i;EMT|FRS3iOqT6SGO7z2BB$CB)92LaAkF*~R=q zT;7i-F@y$`&p_~XN)swN*g_>K&;fh@s(6Tvgt&T%P@5>eq+5MuX6mozolymY& zn=CEU%i-<;!*80lr;_86b)#3i?}o65Q&3F6A(%@eYdzqXX*38#6z(MV)o!D8h$8c@ z$@Q@ftFi0R97Zb}Vh9;e!_ zYPEw6lM6THWcaFESah=D3EoX|AW|UM+;t;dMet#*KUn*ZeoyC*n$7j?k=V-m%hz)V zh3Acn?y1tPuiC+|3E?2WDa&3+x~QEE4mwge!+L@!!FJr>^P#}<K?FSS?d=(9PQQBB$(Q^zfY z=0g^h2&XOFdU+GGhV`ge3b-Zm%M&PGEu`NIKJY5DQZbwgkv^xufbEZ$kQ6h@<~Y-T z+GMP)cQkBeEysvOf}zM@3iMas7N?ydnndKLQY%hzR$PO(n%?{@*|ZC5Q(fM;DRwE> z6*aO(I(_#&Y$PK*Yy_H$BCo@FTJIpLPOIz!5iR9mMoydK*#BvZ&D7+JnA3M>F3j^` zEz6%Fw^RY^B6CLW1Q7_Tp}H8AwFE+(?9_bi3)J{}3|^e_g#XS7K9E(oY?fQI)2J(h02N>tB=M+4 z4^$xhr@Ztobo4UauCVgXe~Nx|mHd{l%8;6@;Q@9~g*7uAs^rU+9CBg^7yzV!t{NwL ztxii1!X27+^{e-b0tdYqWV}>Uyi#KexMVakOKAyWF-jpP5~~121%N02F--VPWiZo> z71PWgt|+Qn+;$YaAFmwmcj{9LEunBJO1|R8bGKL20T#I#@@4BFGOsxuME6I7)JRdU zBHM&SU#Gl)UCmT~snIDTN}8D#Sx?509LbT>qW(I21Ae)MgH^eqzNCrq#>XL#pFn`~ z!!cDP9qIp)X+*1~CtrG?KY=^UKY^kFqZp&ril4w!(QBVlUVM@B2l19$+t_|E+tOQ0oc1d?WWA>cS7-h8Psw~TyIKFp^2)x@G_7XNlqed%dgk~C2v*I~3bG>QO@U{48{Ol!j zR!4Ln6BwvvhvA?Nzx}uphPzsZ>gwc$Bi(H@>mVK%GpyowB!)C2%^ITr zj`Tu%d?Ce!gZ3be3;5+)78*C0)BUwpy*=>~Ap((4NbHmg#~0-(#UyyO;j5uK9v{ShV|U=fL01i&T53bI z*wCcnl@e(D4gGSzL_;)WoST9-y#1I}7GKxHS93)#O)y#`uKO+8o~!8gJxM~ow!PQ# zMm-T!8rDIn?l3|qz6>4+l}S;2xx=NRfJVj%jW4v;9DLF&Y<}8G3Yk@@S|H$QT*P8` zOnw3`lE`s--f7*a@-Q1h8r{arohE2jrKjfsr8-WM3F9Jk?b9v#2|1~9%xn|pRtUd@O6n={seO{HnP+*YZ2AN&SW z40u=}I^cS{jqAxJ!j#uDyFGw_mB)hEwZVpsV~N8ouXvYiA|aczTs{ysj`?$ZB_96+hskwkl%Y-xFCsFi?l%ct8^ zk0T;(T)RQn;ce=$#=2h3rRtMekOhQof5|PAc*m>k>W4 zOir^Rq|+n{SJ+dXd4q+*MBwU>6)s+gyzQYIl9ssj3R09VVHlledAD)qJ)#fKMb>Y% z(ztVFzq}&P*B4tK<_m<`r6BarQEc(uCgFqt7F|bHzX)%FBIDm!{R_RnK+lFbi0I)?1SyZRQHrW)7Ir?I|r*ET&XC z)Z3DUhwZUK$5>%J)T87GNN|eBPZkaFekZDBS1Xn6_vz zmHG*!NjQZ>H=Am%&UN~BTp7y<3k{|oFj9Q*4M%m3F9r?7cnD>?!6;VPtper^r$TnV zUQc^q6s7;xdiS224%6F4b7fhCwXwco2(I!D;*DNOm#!BU8dp;Uh-fo?t zxNhcIRtjgDA&RgB_N0W_3&967RTNFuIx|9SC?Bim#IuMdohyj2@gI^|-9E8UIO*l< zPVcLPoGu9jckK_QTo;c`ri7aoV8RM&nBq>A;L@{lise5`OUu?P9j(cIKOGoKt&oC#4 zI8Tp_8pcpEF2I?WHFTPXfuLfj_DeLgBE^-85|E{-)RfN5@P*8e`;|D&&#YAWM#Aig zwzVsv>~>WZaGZIZn1IaHN9Awi(@mFJ@x^ZLjJR{HO5QizgKKIrDqj=+w(Irsy_SBg z_P8EuRwb_I8X+`Pw;%?6#AAY(3c=WTMSNUa8T$O(@gs$K%XHS>)9s~JQwIutm2URz zgwf2aO~TUOZ43ilkV{O7*)-B}4j1J)6@9Dx?wrCO7};2QN|-7iNr7=#M=wHDD^`+#fSu$z#SJ)q%~S8a+*WfB03AJL-3Z5 z=CBHex;ys5vn{*==h6Zh%vWhSAZt>wJZVF{qx#s@IuaITm`|P$-Z8#ylVq>>1iC=F zd2@R?S=)9YbN5vzr87P$_(aIRt$0n8{u&F+>Q$il#JCvJSgN3KQ2FigmB}hGN$%0y zH23@5+f{Z|Wc>>^1(E^lJJNR9Lz2>6TA^wNkNS6k=`CD9ZI&koyASs@Z@nk4mPRff zkyLA06kD`h^lavEM(g{7c)Zxi2nR0?VAGyvDLS)&35|h*rZkAcvT*ItSP#;5y%`|5 zo!qJLa;wn`!vfmhhYXi%eOG^E3BEWPeS1-k1;6}XV-H3x^@7P#UsG#^;dC+W!ImfE z^kC$nK*4)Xm3j=Je|@&Od@n|@?Dl@NwX7kHqBvUpZ8k<#f_2(Um`3GgAJ2Z7hbhg!yrGlU zsO`J!Dck3VPCtS26@Rx`Lf!yf^ZRAjGAN{mcRFXRdm5rK6ev!8SuaB3c=#Uu$ubT6 z!8K$*>jPonxI(g2EADHVu0RTt=1CMqSgzaadT1Qh70D!uoI4Lovhq?sij((Qg5w(q35rYtRUjx#1i81 z-;Z1{;rHmzDxu6JD6p}VlNJ8s=tljqwNi2Ky*^%*$dDDm}=xU)0km9Ud9#Jqpj|AR4MiZWozdo;zaU_izv$2WJVfQ+o|q8vCjtawu% zMUKQt40}B)9Y8eJ?o?y8xLa4+urS;|I@%Btbj)~_FP4ig9+O8PuxP-OL7d1@7;Ph3 z24XGnkKm7cYq7_^8pM3VT#|LwPuSP7!)5Vmsz!Hsg?aT@{cyoyX39-FD?^hx4uls+ z2no9lC9|f;%Hma4(SZQ&2H!P$&SgySH2$;)GK}yMEJu5w^V&~Gv>`ccv(EV>6x-cFe1pco;cEZgWW)qVhQ6$ zkfnS#)WJeBVZ+Bd&4i}ZR%zLm`~kl!r+b^t=$JKBWstD+stDpRujWN!R%~+@M>Gp{ zB!>N#7*>-?-C?t-Dp~yUE92B}fbummPY1?(Z0|^Ji|;!izV`GhI?3 z-xNi9ZB}wcA9xQTmZnu(i{!_{0z4EvR-bgiD(%H z9#P8J;T!0;#eu0lw_I>m`KUVjb2;b3+pT#%Nf%8d#oaZi){icK= z)-}cJ?V9)K5q5vP&{=#^;=Il(Nv~CHoDv>1E1rS_h<<=8Yz&_kU z3|lF;IxVcyB-mlM-paXiB5kCnLHt1aQ5L-mbkZ@M522pP*aAM{7{wF>BMc}?R=h;q zp58(GG~e=!G%ZZYvv?U`-lBO?mNNPNpn9kG-r)K1>{VLv6s^~fk^Nv$+7*s2oDU>$ z8EkrQi?P+<9c992-q(@|-AfrLndjhDS>iNKY;vl@?kPGVWIpUVpkPC-tTVq`w`vRI z1CxFNA6YLqnRPS%I&Opt$^6BL^!A!$#L!)BOZ}_}Vvlh7&sD_<)jN-_iwDI}v%@lT z`}nH^p8t?>;#&qH*}Zg_qWeV1EXF>i{{)D6Vcjmx-)6_Wx(CJY2hrG@NjN!&D`$b- zKV~OJ2b8DiW0irhxLl$vb#t5wUc;!XO`^dUZnDp7Eb9c?^5t@)T$#pYr0K>CJfIy8 zRw%meij`30>e2)9nu1~vNwOEIsxQ0*hYonvvg~$}eI;26*s+8Cp3x?;ZCXVh2}F1j z?^op3=}3ElY3)9WYhO&*b<4c@FHbc&hwCLA^3wd^;9n?Bj(2&9eMPXQqanf9@8fD$R+!DwAor#+fUl!h|5)^w%AZ{ZgAbr$&tQ|_GZGy zyc7iugb`)p=)m&O_?r3^9Y5APihTr=;tJko7+dgGqOARU8(>doz3NBD`^1*Y#;1&^ zGTi%&Zfy5<8!Ee?B}t9UUdvgZmn0S+$Dvur)t5eRU%QI(7&NW#Q)l@GzkNJalOg;_ zb$rn-9na431P213#vBY8jqRhr4Ckw(m;DH7msX;zuvI|i$w8MyI zp=|q8jE+I%)RBnzaiZEz=j-bkTwkQa=N0=LsFvJC4d3x7Kb9iO(kYC`?xbCR$(Oqw zq7*}770%}bdJXsI=6`au{E5AJbg1XGi3+Apl4fQm_{FGxTp(~P@y(wKm0cck*N_e=;NhV_Sa$RSI&K-x_bPoiY1VnPaPO9O@?B==nPG{pL!g z@$dcjZ?p7`|7Y}sf0x&$*>B^sv{9WBQkFfZ0kN~uXZJF zk|zE(IT?^|R(srRJSgtob2l}~_DbBNa)*lnnqg@}%{sVlB;wv_yq86fZ3S!Sj`B4#)C^Hb{uVRhoX?HBs1GX7h zl&>oAX`3cGjXmtxb)mThjrmNeN)-xJ5I3$Ae>io7QLy|c2xha2B>eiV7nR{o2Jixf>}e(8_saJu%U62aXx;ssVkXJ}V| zw(5(ssI{Ujv^SMfL;-rM!qRp(s5nOn$jJ7^KE3S2Xp*16Qmwe#EYA{-!ongBbY#)^ z=8aYkPubAML&uV9^mVA@MRJzjeO8*ym9!5F4C^)~jb)^82wwB5{f_R+8sFEElvBQ6 zgHNm9{QgPlH++LC-wLS>mj4>G{^qZkQ!xBKwpr8J#s>Yz*z-4kndybWC&}37r`z{q zLz>(arrAtIKG|2EMWg^ryZ4jX4_p~~)xZ0Fl~-;BzbCn6C=2Ok4LvpFe|o>-Yfj&- zJ3b1kI(4UIC!VyyJA)UrAcwvl6~#J&LDN$2qnD~ic*jUS4Nv5*p0WoI;AWq@E4;sC zPmtu2ptkv5i0@HcMtDSjK=FP5E-=;TQg^ck;pKw(6)&Y^q8gj%yX!X?W-R`Skzhh! z>o9B493{is@nwo-ePY-(RiNl9TiDbx3lcEqg?>>gb_t@gf{skb5b00LakkNhwEOX!=Jg_kL)gt{puA~7^KfBsUvfoUZC2_Ct-I60} z@?JLXzib{lEkijiYW9TiIM*u^SiV~JS#WVCt8$hqX=4GXBGH$n=Y32(R30PFq)BQA zp~$}-$=fh}b6@z1zo?=(csP^fD##Derf2Pj&3x@pQQIni{Z|rG28F*HtDwI|=)Nf{ zbVVlkiif~hIL^&C>fD8oBG<#Uh${S$-=jd}malo{hCdESec}>Tl7yND98XQwhha%~ zF%$Q*)hL!=yvDpC7o9&&mJ?@W-D}A>c>L!q4>OZT* z(GY2nxG~ekbe=R~_Q7yl+oxt`v#YWg<^WW4qv=Tjq5BJF7}XULS;C` zxuNV|frua7$I$Fe0BZ=En@zqs4~8(I3Ujcgwd^!Rr0`w}WjF^Q;W2Id0RA++ZYN~( z1o0EF?r)iCr_>EcFK$uLZ^I^qrdp}q0}|1ZAs%9^eDr_S7iSIF$KN6M?YP`)ChMQx z`;CNbXIZr%XCCm7^rq)kR6ZI)1xr-QrkaLbm*S8Eg{SD{dp&q$lq*Vd+3ds>&> z60L9n?=BS*TT0}!(1HZZf0_oueqv;U4Oz8l;btUxM2%M6;j)_v49>5^bVDIYPtrmB7shqv#qSpgLZbQV5co)O zfX!5`5!{edG`>Fe^2kYjGysSJz!6YB(=5_I{fQ{>_KAFRv9lf@ zmyY&|rhi^?1&@(FVA@L^LaBt_IX=@u3cIl5=vDtADE~_NPq%D>cHW|=g`{FdjTuh6 zh+j+*n9hB+l2ahp{%FU_(STa*-14N>eqNo^I9=bcXc2I9do;a|6^CWc*nyJp>Yz9W zi#!CutwM*y@yQ{w1)oqZf!Uedk=B37W~9XUxB46!-#dBU^?u|vZ>gm>jbjF)O6ISm zJR+2C{#%j%5KoqOuij%TzQ#ZDDl>0*?SGD+XnmnAx)og{@ITml>#(-gZC^OSy##j& z7DA!8mSVxB1cFQP;u5q_!3ho_IJ7`R{Xzvc{Y<{a-B<9+AMZ;bI99Y)tIcFk2Bhe2v){@bVLX|DCl1vyh%*-7Yns1i$V?Vh zM#tHzYns1i$Ul7VzrrbAXuEOV8{egqFT&JM-zftwcX+GzzIjbKkEl`x7y_b+%fG z1DV!LgppYdm*l;e=T3sl^p}g{q?*HK_7)QQh&($px-xSS+Bgn;BUqg8Z=I@krToDq zSTM z5iJ&NAba4b;3JpuI6eFe*O-{8RfL^v9(=mRZ%vg6g61#f(}8Q`QsY241$W=vH`iI| z9hlf!i%LAT(4YCVJCR^8nWL#7g;%S!=NM1|^-sr7QbmzsVlQxmZ3-wal%e-TmeRXL zT**%;jvf#tc~+sJGdbhX3hIr{=vX3Xxrc@gPD+tIMW{Zj%Du_75Q_&?s~&;ocj3W@|+X*K*zIB9hT;bIuYLEwC|I?~+5&Q3?8>w%9>_X|0KSWtUZKI#L{ zvr5$kuyS^)M>Pt-03!>n#OMDo4^S)qp3Xb=j#WC`2k1L^$5u-G3{hG$O4L0nml51S zpkq~(B~YoxA4$YZh>4&S{xUXw>!%VzF2ncGF_Yg0ub9L;mnWUr*^a$8lnJ>x)aulQp3?kY~j2Z9xm! zS`|w$;y$PoA5B=L6E2EAh^o9|eStB3q@|eWj5OJiMT1k!J5^;UMpaJCXi$7X|LCet z$0A)(m-a@#Xy~I7;95a+m&3;uc)RV{#EWDh=8PR}f+mNBBDmXFy9-2!g%;;oq+y&M z>9W3`2T69E53P2mp=DTqGG_)bQ?$CPSz0K-Fdf+`%}=S$(7q4oj!OKyT5&YSxJn{Ba!Um}xBq zj-_*1g3DCvt;CTA3cB1=4H3j&*>jy!9x z_Y;!~rZPuzHgAw!p2o!TPn~N4r2w~>OSADPY76UBML%Yh2#2_4j+myGyoh40#XTxUd}Y|a zYBAq*_qn_*7VPC>+P7`w(AleaA3s zP5Mn16uFJctkYUiS8ZIE_Zxdyy9!-Vccv9Fs*Kd20l<|r?^iQLM-(ePy*Z&WHl&3N zmUJXm8m*X6v(&92^dbv!pyt38$vlW{@|=Kga|iULV(7v9F9KhO8{WH)PfSg)8DYa$%y4fp?Sky%e%$_wBonQbO=uU$Sb{0uQ2>6fr(<_$f ztSe_l@eknbluolM7edhialDQU zKM{knAr>hkr70e(0DW@8mZnsOxUImbhTA^2FPtqrtz9>*Fx+(FeuGs;xkGkNky3gkABy@a!4-Q}RrGS|voUD*7)ue9sA*$VPRd8kAssI|h zPGe55xJt09$p5}}WpJQruFe)+@4;t0K;oR0U}rhNPxli*OJF z{;U#(6cAda(Cu&_R>@x>j2>Cvr9m9%tDuws zK&b)IB2~SwT@B=g$zN~O%zp2$P7|#8{Q9%M(E$~OlsEi)wr*oUfAG+BVVQH3M%97N zPobAIQxzk3?JQ>X-ra&Qtn64phGdKtuQ>#}gN}7T15N2xYw1$+T>W2fSTx~@JkXl)14%&MwZR6Y-6lYE9 zl6nq9u6%0}^`wX^~E8Hd^*7n1PeoWE_+RfQGS7IDCFqKjk!M2o7=0xgz4RDeQO4OqU? zJwS_!oVTfKCu|r1=k5%W7-kxvD53$BA45$DZJAy7`W7cI&D<@XNO|~}Y7FvjMAd*l zsO~E3JrE~xnAI2}Pn5|9z{J7I>vAOAOJy=lnlgrIQL!GHYf?99N@{RfNx`!Ls|+szKToMxh2SxH zh7ya5kp+SRQrFW{#z3{ELg1I&O>32$!h*ptgF|C8Yc{k1)eJBc5~k zkJb^|wlY@YR)oSN1BhHb{9a!B?zKy1XWNOUb_iDCXxCV*SBvME-%eHcA5?HNfH-PB zoeT(I*=Jq#Je#u;Cs{)%T$4`zn$Ti5)eI!P7j;{K-Z~rbiNzzA3+-o}-WT@?WvDj# zHS!Fzs5FX-!Vzh!ilQ-;LZ!k99rOl^>b2T)XL!h@P& zI|W>Gk856|Gf~f|@$dQB^Q+HadpkC%)Q%cBcv(wRz^c1kAn!s$AP5JT1gC@$Ct&m4 zP~V$hT)Xg}+;JR)iR9%RYFXH|_`1==FgS^3FCeMn5GAIKk*3E<;ZVX;F-owH?a3Wb z*Y!KamT>=-Yl;pyf^F}mQ$s7VxlWqi^04A5`mq$D{mbgcR`qel;awJ+b27X~?Qg^q0%;ax5Ibp_czKl@sycv~ zA{<_7ahQZK3II4{?N}&u+CCmZR`NBK2-p?e1od}BZy-*ty;cSWG(D74|?tnN$t3+@E- zq&=ZbvUDrMde>7Jm2UW3gD@*h#|0iti>n~Oy_Y>kk#J-t=mvvQ@xqN z89I0+=QSYX7sgVN4{cweMx*Zk`u;CW!}Y$_eFsrE*{|W(pbwoBgV`hOPO52^c+V>f zC(ok0`vlTLobH_izybNXMS3e=h52(a4=6(i^ zuy)!(e@l9{x7r5p*qwfhdt4nCwWLevSpdetxp^9`C2sW#CHy5+Mg9XBWQ62cmtTc} zDh{J6g%_#>;tE#y7RsNWG73}wM_7(T{evfKH%}cd6_}LPNhVTKG>t)ePlcZQD;BAw zHxTFh2C$S;;<)P1>j3aUK+>r%h+sS$@F#$TxFHKZte-tnbsGs~NEkuVkwJ|9UN7ZIhP zGUtLSgKTg%%rakC=~1Z_IEz}p4=x!F;NYOf&P$E{FHDwzRn%V?J2k<8M$Z=;FPw*| zD+0=&-X06^G_-4~L3cH_y&plxcKZ{2iNEhsj>8Ran1bshrBMoX&WMZ`VEm^C7yp>s z#V@U2cZDPPABqeikjm`s)sn^?5U22XUJ#URGBQDZn zDHggs_65*LP^{*iW{s-d{YMjX3n*D4%b#AIlG*gjZGmcX>)W&y_Hd4=5a$7ql@qmJaxSuP-%b)50lV80*9Aso@Lx5{fabLYbyKlK0%CVMQY^AXm(37 zMd!Hb&K{9LE($g!O9Rs`jmANRB09?CIRAQS{u_4Ye!oWQN&BIff2uvt*SGy%PLt8> z8W9ffavYCn9JAsnKZ*T~0RWe|yIY!jb+>cN%Zv9NX#HebE4)0&WI)+AxB`tR)kK1n zV-cKV9Ck5-mxu%GMiBdtop<*6za{3sXA3ahqVlWEk(=(?+rzEf9eRx48%^LQRI}_~ zR9A}qrc}+O)?Ua%!a|3>_W~)Vnd*=0uRm<6k2h~X`5Ru?)0995{;7=^sH`PCdh3hc zJ?^0;$7a0U4Awj$L|$v2Hb~hoYPe7XX5CCh(Om?#2gEpqUCifJ^d7-+41YV>G%Np z_=||iMsf1fg(@wa%4_Fy*RF3|10kupcCL5r^5#wdy4~ND`)?_83D0RlemweMukA7< z?Q|P-Rb8}`Yx&6!L7f|b@7NRWhkBc2b^B%*Dr^lHnv1R5Q{|u{jyZOOkf`7^c5`hw zNSaZ(N?Px!?Kx>m%}kOlG@N{R%;kehO|XSLwBDSVS0ur05)6(o{nL}yvgKK9j=FaV zKY|+@yrLavL{4>YAW>;uv@H%!7dsl`>SfrP!kd-o?|i<@I;pYc(gd#68+|oeh75aD zH|i>?=HD0j5kkk**AsO-~yn2Gv>kdb5&{GOj-I_EMYfPcG602@G*ylyEDA9f(U$fe8YNWEvveYp;{4OQzP)(H4^Xw7%tx5noaXn1yXjDfwf0-AV zN2b5Su8aHJMSl;jlJA3;p)kCjXWkGon916-CtX_2%OEn0^w_v{^JbhXjVMvmw)BVC zrYRD`QZVp~(mXV9G)IT+Z53G1Id*xtP8gJWV|7CLc7hq7MkB0}hT0G9B%`>Irwl}a zaR9cItT(^H>wJfyD85gY&CQ-kGG2EEO;`M>gFkMD$Sx3Wlg}x=68Egs1UH2Glc^_> z8F2MU1LFsT7XJEv?cN2U9ECZ?9I3vIk#+yM$R}ZF*96muZ$q=5e7|NCp-x{PX zO3@T}QwK)5YPruxE##HuOoNNy9Q%bFPhEzorZ9R`EfiT!xCZm3K*Pg+3(IrDV z7JkiHNj;^<{F6fh$gJ|{84!O!*yNW2rjxZ7jvf^lh_B@B<~Iv@swio-0(O3ji+yUGpC~C;FJ~gZN#~k)@LFSfx)08zopfHX3o#*u_06L|(pbd; zUbubUH!TB9F$YB-4WI-k)Wu@8${&x+L!k0)V=ZZ*aFw&wqyx1K+-YnBo6o=vEC)GQ z${&%22%AVRghTC;Vow_rIlm_G;U z_M{B!6nH#>ugZn+ukEm3p(Zq%Ux!`Z&frZ7Z?pIyqb!|;PD(XWP6)0|)j2TysSiQ4 z61TTpV*0XT?gz~NVY12Ayr-gx;{;Xiuk6;vuIgEG^kzEx;$wh|-1!~wB0y1a`u$Ow z(KDgaJ6r1j+0ca|YrWsh2xYh8pYv|{m5yC8-juR9auz|9X6W+b!(PzHh!f%iOiQ2< zT>uIIYlKyqR$hxLg?!NEX2jn4?C*ex{!imhwP!Jl>!(-RBzJQ=F>;GhipMenkK&4n z!3r15QNo-mi-6gRPUr%wj77R~gdL2q?btY)h7+QlG}WtW9ha;E)=i?8&y*$f_dT;0Z&enK zWb1fiYpFvMwNeVh#?I-GJG+BvHZ@t9v*;H!t*Az)LEPFce^^9QY^fqOL!SaAfM|)8 zFCHl*ku~O3UCr)0CgG3&P+cNXlm3hQXoZ<1yKrct_ylWe3Zo@5BIRv>$1Pae3g zCp6?ON;KZPQ)MS}V*x( zu&wKOWVWM+GeVQ?J>(-@kBUm5+NFc|i-^f3nK{-`#>Wtb9(x=+Y;kw3s^q|4y~E~_ zbhDF3yzP%q(66tfGF+dVkQF8og*y@)I|m$P+tnhAZGeC~9nPF4f=9_UWYLi-Zhc(E z)M458z11`GpqcpJRHYLazh}pHx^xEQ8YDO232biFpLVt#3Iv!_rm4FVKzUP>HAjjG z=JX-u@U_~o+GiZ_v2q?2bh~R^^KQGc9dBlSj9gH^fZHaIKZs<;M33dMZVKNQyqMqu z5@Kxg)PP)<0RRy%iSmd3FX=k|6#YWmC)-yTGNLZQ4$vfhuybxasvZOVF2Y8&JwG|Z zE9F{{Jwv|F8vT`Sj?8Xkt(5M4Yy%gUW6kc<=PcC^@GG6uR}2KgXbh-|eOV}BQY@vy zKwKq?)f_n1sV8n{qP#|B1a=dI2S1;mr8Zsc$gw#VTNF1JT0rSnVT*x*SF^)$=AVe?Z*!LFN}FCBhEmlcRLk2omUjbvfPxBpp}E#$XoJGB z;_0p1hPHPb_jqfGh$7mWymgv3K9QjqfcPC&U?!e&qMt6Qa@jUO_V`GpO4X>uY-PG{ z$TfnEX_tFJMZppS?*26*ZA&0FP@h9UXZkwyVi<2M+;f6BUH7~ACt|-FB`s`_I-lIw zr4|RGhCtstjEwiS<*epn+>Q!}7c39%%*NPi;NWq%;sbyc9u_y_SOmjwj#@J2pSulC zNpxke5lM8dh1Rt`yS-f`HnX3sOM})K((k14D^lgSQSLaUKNqx6G9s#`RLkk(_}VNm zv0H#yN{pk@cX92okCh(nnW1PP7l|^08H&ILIZ&D2z)*?W%o3xqA`JGGllXrS>)}wn z``WYeJ79e+_xmZ2*@EeHu-Dk%-|Q!i)2BFtbZoQ=)lAAG*SY@6VgKt;jc;c0 z^%tI8b`553;MYkHtrB?hj{!krRep3Gt)tg9X!08R3mqvDHO8r~`3ro`jHXnp$0+^P zwYyydwX49GQB#tJ)_;r=raKs93jmFcfBs`6uR6|<#|Ap~A2sFl3g7rkkZ1Y6^JRLc zCR26D0*eGZf&PvWvV55e4mv#RqM|v$$-$^2QO-Qmii;nVOFD9oiWUym-__)sJUi^} z_)S(Wf8c84=j3;5+t$PFN8;GIHJbe&t~Sgy+x_Qz3ckBtdch02Pfb&r9iD*`JFJ5e zdPdT_vr2r>R=6QR+&MHhauP(Y0|o$O2|b(TOQ4FsP{#sa#)kf=r)e#jJX06nWk*{d zf&vnn;xwRshL**&xsq`Cbsn^2s4XRKbO++{&*2y}@B*5LI!pmRK8z2fM6;78h%ADfnJdj_k9^6R1+ znPr{SA9a5Re0ty=k}iL3Y^8xJI%#H&X;dL zL9Y)+v|wHblA(F7P3i9Su^F@3=Ue~F)4zJnfOHvLq!+5n#3=S|syKzKWwkQUv)_$P z$?|Wj1!zt*;mxw~k1zoUSy{U)$OC={{NJaf|4JV*9$Ol3F?!=ow-|SV>!Qk7!;wSP zGLN}O+o7%zmlqun?s#WT2t`B#Qwa)ivFP|y_|-V7)Ubrer|6FWYz6$E;tHl^eEiYz zb{alQR4w{Kpm$v8*jf9e!CM{lHbS4>M#zYTMP*fx^b(QNMv5)?+17VszAc!r=VP=? zUrmAz=#aFRf>cgD`qrwW#=m9FK;ke4DQ*dc5Jx;sQZi^S`I3bwqcg06>& zL;SoAk~CF(wd2Xs zpz(SkNK((B4$W4J=yvMZE(=lKs4F+6$PKr~;YAw%+5OMS4IIpt zUK;%ES!xngcPt}HOJv_#b4D!OvdLX$=XqjN%}lqLk@~d7y__LdCgOtIx-L<*;~jZ< zF7j_RGH}p*AGU;o796EMD!FR*6A-U;-sRv$XMz_TE1j6_-gB0!V`Li7jj!6_q+jVN z%tA-2Af-4h1$g&>U)Y{D%1lj7R4G@|cL_|sR*=Pu=BM(w&{C%90BQlSe@xhlBf+~A z+uSL6t{FF*h!Ur~4SYmAC}k5r#%Dp948cp-#T z6w|7e<4^=9MOfSue;HdSqWBe4%BkJ<)^2`L&2nT#<~DTDGN+`8KE2tmGd*B5@4-^= zzDz%Ns(yH-8(8H`c37B_EGrB%Esn#`&F8z)5_O6#+L@Yf zf<{r|0vLMRD$dOoI z@wL6sQE+%YR%w70Met)%9s`M02@nn&=mS{yoSjmP!Z7Ztq&rJD-5 z?;m&|qp`k){-| z?cAbCbF4@X5Kvh$!Vc2HrX=`9*{Q-*l|;B5xa~DQx?~-21(c_A$JuA;`QTs+2(XNP z{VS_ru>5na@PfU2X+rCcb{3T@my=s8>z@)N>h}HnkvBDrU(hdC^@5+Jfgl9a+rikT z%85p10?>U3nTzwV{lpJ^U&~vUCl<7WLl`c5zR5hCVj3&3{uZO3j>Cj6XCE$!DqyXm zC%6wuKyeW3i;L_R$w&y+W6rpno9ai$+i@m)AIKk}Pti-VC^gJ*Q7o?7w%Feaj|~mT z{hUEOSYUi{ZU_=}lhPq=<{{w~#_R$psfj{A&X=ah#K=2TAH;gzc8l4$aKmR~ktaEYYcBTH&1HeqIB57$ zOJ07^nWDt7Q(z+=K_REi*hdHg z_rNCX0T(LK%w&r51PaV!+P5c=5gD=0LWU7BV|WcTQTGa&)tMj~%JEO3=fvQ^QrxR> zkSL=VWv7-QQPO5z+CWyR2Zq~WO0?-sV@sTJU#maK3StLPpbwG6mYkpflsWEmx`mhc z4(-DXKNc1(K3IU?Fqeq8@#`g6_-b(LcKQiKQ0f7IE2SMSE2Wr98g69{eQE|pfE-MH z#WFF>zQ%%C0$C9`%&*!j;kivdfdq{fBhZLxRU=jEyGoE`nqiqvJiI>H@Ag)7QSq{* zyrPW8hiF#<3OOlVuqr+O$q9Gqkg8-qL4lkOZb4EwJFAU_<+I-;vLR;U3dE+u1~6Ti z)}`c{(}K6Ia;%;rT)byZ2Tp}@D+ooAEBkTpGq4PfDKGCTagJ5)jT~*z<3*J&tP$Du ziYsA)W6Z?c53@znRrQFo#vz6bRKRt7P7*x2xp?0;I>k%ea-z0@y9|<78XbnA8VE8M9T@5?E0)@9C%xK{t${HI9tyb zC^7?2f9G~t>iPHFQTfgT8H2t1oWz)Ik1q>vl+_vnP-u90I^S&}F(Z7SEEH$NT~~=y zabWHNVVnEMS~CkBCi%`hE7GE@RLwv2xF{;Gy}6DQd^kZ*%&N zyqC>dtoazF;cZ56%FPFHyz4x4Z><@k&o?B$KWdKDao^-ggIJL4JaSN0FYNKS;WexV z9=&BbL`^GUUQq|rkqv{9uGHqBQ2qYg`%jHzTA?duaR<$UEMsG1@!|dFUeSr`YNFn^ z9GT@!3s8(mVr<1eKbZk4DXaoOUQtP#^1y6=d)T^l!hgMQ*1V_rbY;jGU0<_6nmW+H zZ8UaYv_K$C%Qqg0LjVaIFGnTYywBa#k=z^1nj6ce(5?AxMF1}6T^~rsEXAIo`(-0_ zb33I17~f^cuS7{nv9S&2G=@#d4S%9J^`G%JZF!$JaVMV@d?|vzAvWz#K~e_EmnUpp zpggOngu(N;QbiLl(4vW5A(cO!BeX*>c~&*dPQH@6@J{2%b@g>c3Ymm5%09Ws(L?0f zv`TAt-Xv^7HnVZ7c2W17AYsgZ*0TZHnT#K2qv~7W;RmG3((dZit$QpCtO;2+ir=kO z>MHi>u0DQf;(6$39OD_7=b*65KSbrV{_K$1O8a!SrNTm(oYz6f`A~YX5sl*BSVy~$ zP+0P2oP&(?0ThLrU29sl(^3>Ut`g*>TI#GjcMP-5w-A$F%+1n`ww+9mKEl0CF_xf# z@jwA_w{B6IQi!|V-T^r-PZx#qPv_{As{84Bz9GcHJxOXQ=1LXrgcnq^b<-nVy8I7d z7M%e1iIW1Nd>#@z5S(@nUtIcp)%C>vRyb(L;~hV9%Y&K?qyxXo=sJ>%N=6fVOC z(}tIY740TB4!ts=+NCsdW0=QM2SKl^q&0=oRpbl}U|`}ar+7yBgBF_BYpUjvsiw0v z&iYNB$lCLG)44Jjq>AD25Vd2gOtNITqasuu<1)e3)^M?SXK3bRV5f?1vpK8XB2Q?Q zPK1CTT04D%j}}XgxyB4CBe+Pcp5HRMw)+MmdaMY15B9}zUBHp$&ajP{?lpI z%42R=E1f^T*0I?Cz&HP(zKGVa%;uU^mh7Utli=eH9;_NA4g0>Ds7Gj~jwecP0jez6 zf{leyJUT7{P*6{+x9Tk3z0^t*bJkc5m<2(2xk!#kqxx#uo-La|NWtlKkGPF|ZlpN( zV#Xfu2B?M<@@$(##41%on3o*hCUN5jDzo)iR!VW)XfkKst&jkc9J$ZTZ$6qQ6{yME z@lyTJ{YKqe;``5#nJ%lZk7Fd~K6f`i{qR#$&k00yqPJ?zwY%FN=NB<}jwj#Ic@tpJVt!MX)=}uBto4?6TD1Dd^ajAl zAd+2kK~a5|MDz(7k)5loHrJ98t0cl;qG2gS>;3*`U%R3iO-;lRxC-?!0xK6~?Z2y4 zvv>D)G5fZZNlj-Z+a@{$F0-FVBlAQRnrc!A)C5w#>Q&+>a3FH9a;1Go7f-BGN#-|2 zY7wLQI+VA(t76JN(vIPbl(@5N(pswQGWGoXZX;0wWDNctz+EOr6zN~)af_<^0h*Ld293*PaM$%O`F-ZNEnwfHYLw`)v? zjF_HPHhO8#f+xxSjpky%#B)sodi$p_NH_?O4f5%^bWsNkZi90L2ZFShC_w&4$NxP& z++x&BjWX4Xx5P5Nlb3wnZ_gfp;>C7um5I^Y>;YoC#|(`$W+u4_vb54x%@b|mOU4G4 zBE#Zji2%_BoXeawz#<2%!7IchBNNL!IH|(zglkjUETLORKcbMK4g3~_v#KVPUkK`C z|J?!PS;{c4ndqK}i+WlejaHj8Yo*=7Fw~$Zzo@<~lN)zW1u*!jTFZCP(6*Qn3Eq-_ZFB$H=KgM*lNV^&OYG7nelTbO zhl|}HxU=fF>K}N^VVxG=l2L-0vB*JDiQuH-xb}sey&*}=n;W({>z_XmYg?I_+!9iq zP6|*y1_|MSVf%hwDP*zqUN6RW3(qQ|0KdWwmDaUR%&s50c|6?gCNtyP_bJnrG)dFv z2#Gxs9i~UK!NQiW@}`GyaqS^mjZ` z|MzL{Kgw6WTLf&vQ^rIBo6m6FU?QkcOq{NC#Q_zxx~i3o2A2pj+D2I-z(vrJ1=C~2 zQiPa`SLnz2YZ%pQ8C3rY+~Gm_DqiLY)!I*eU0DdCz-+w0ecry_D!Ov|X7J$K%Ov(0 zL-T~X5YbZ2{3PooO(G1(w#gsul{eeuVgcCp%1x9|k160Oz0((0-M=6K1DYa>e^$2# zev4K3`Lx@2K0TL@$Ln64-Nb!gl{G!VT-~M#n|^7#uJK~-d1KQGZX{-EQ9LM{hyJd& z5T8zoY`FDQPlrdA_Uv+IW37|rCEJ4t)F{EaO4*Qmhm)>>7e`op1)0r!5f0Na4(^%- z4P3W_zZKSLs8OIC-EoKoef3$S1Z{@plrPMe`!Wr+Y4>%TxbDoCdY1B$hdadr>+)|$Xp`=x*;?Y0kG6SKmj99ASV2AH!n zF?5H1Y*yrSC1E9igVuXC)P(sm4HcO)X$F?rRLE&RV6|gtS)3jksQ!Gqm-Nw@8Dhw7 z=&Kr(3rkAkhFVa73ne~uf}4rVy~%5bTgPrnPL2#q#3E1Pe6}S#E02qw_OgaGooBN4 z=nKD_hAEN&2sxZkAs3ctDN1{FH@(#>W7pz5N0-sDS0QW>(eWC!qnEDUjK%nJO$aBn z7PO64Nd%7umu{>6g1npSSwn_WXSKtoH1~V$PXFEKY+z@Wl+!u*IH!J*0fbkhA|LTy z_%{+byP;yYi=sEb$H|^Ww3bg27!zr}+j0vv+A{O;VY#f>0seNA>if811Dcg0b@+dXmh?2#_PXOTIYT!0vU_C55 zNnWqF$pgC~6!LZTOmn861eJ%@oFgrNpIWx2Cd6L#zXwmBVB1De->^`*g39A!^_ zkcc8;&gR1ggyN2U%o{z^<*&f+mc{8WYmJQ&rd%$ap&rasLBkc$P+>F249`(jib7Zw zF+l6kaM^&NKy(V;);D{f{l1#TM=i*Z#TgG(Qj|KIppQjoIt^iTBb}U)ds{3Fj#T6~ z#FYlJ>fuXayCzI8YmL^}t;Opqu~DgNWH<#fJWoYB36St?hE{{Y1u3hUL8#c4le3x+ z{C~At*xNhy=8T^ieJN9W-z!McJr+rw}w;c7sc}YK&FQ^ad*bD{>d;K_6hqRTsg$A=xo$-@9CA+ zsn+{^)k1~{o&pWQM3(l@VG$EyR*+>}45bQUfl~*cgQ0Yl$<564lboqbTk{pq+lz_H z@Bp+xGl~$zp-`|~Y==kyet*BnUhHbCg~Rr7xP3!KS+y|x7J;W72DdX+samlu1Mq{8 zM~<3XpM94X^@G!bqI%MQyoN2MmLhTgJ6o+c*H^j6IX>iRoFV1bwi2luKpI=d~ zorYc*)elO^7e4ktM~W-N8x$vQ5o|S$%Bv| z9lnXr`pX>MR{k}~e_8Th?)tAa`LB)V|B&7F4m~TWNx8;GCT3%IUsnHe`dFu+S0vVS za?z9HO?Hcl{@ZrboT>rI77PJKsg_+7rdHGX{Mj2;JZwd-B;Y zjTk^h9A$W>SR#^SPl6$+;yAckNmUM)-aNG4W%k$`+lN#%nErMK{}A*7FR6t^Dk+zl z)sejE8;P9%i*-Ioh+oF!%ge*??i&I;r0m>>wl^7%=oFtvk5+P^&vGhLL#2)iAhtp| z%uNKZ9vG5Z(hm-GPdeW_v=DOiD3i>4#m4pQBCGPz9{k2QPp_32qg9zOL_{qZhOD=OnSl91O$WMn;<|r5r@1h4BL%<#@owX5Y`RZjS9c>l6+1 ztm>L7iln04=xPe~91}(_CyXp9cDdu=rUdfB;LsErsaym0Moo_>o2XQt2O4-U{LLeHf2rZ4|Wmd?YEjFFEb>C?cLI-%nooSR^@ z7SOz1+Iu1=h7HtGYX}Ua=?G=M3jZR%ezm+gA!PK_vc6vOdzYIuu5y0-esn4>cU`;b zF*Ax=Lk~7aR8okT-!2Fxci(-$XKliL6JxcO0}rUm<(K13i#A@Cq??@cMi2PeWsikE z58HC~Auk54@^Au*41u=w6PK-|+DVctgMsuGXA4ZJ7QtrYym8(eG&&nInc(AM^2vH# zSgCBuR-Ux7&8pmO&_li5E;;9Wt#VG}du4X1K_7VWWh_$d39E_2ChUR!pPNBUvwe1_ z=>%U-x@JZecj6Zh5BIx%gjjhdxesRX_rVy`FAyRFCf>G&c>qttx&nNh#c@3Q+*WG) zwuwi6)*yN&ZxruLL@8z5P!=WRp%&&m&+AS{(snGa-sHnq4CMQr);wu+JnNRE2r4^ zLvN7U-l2N^Cyx?`eihy&RO4`RGT&`$Pvc0AHhdBx%1uLF94wdeooNwR@ai>+cEr%# z*MWkufmV{Ibdt*cCAm&g-42@qQj-);WoAbd;r!k-xYhV}_LO8u%C$%`hmWB-2bF9; zdeH4+;#TNJ>Q*wIGCA8p3q2ZCz+1)d z=Ox*|gS%bnKrf}!5kyq-^7j(i+&Qf_3CMpp4)Bq#v9+A6sy)Dhcw`(#4bE3YaT&#E;W|MjD z@YE6bvi$I^_f*?A!`FZmBNl%L#H%<(OEY^Pb5qV18xQl08_cD(z8QK$|6sZ~o_mug zuBwslY*k_2Qj$`@Ml_1rT^i!c=wWzk4-fCHN6zg$k!8hR3o3#%bbNSSzCF6duU)bu z&RZ=KVN69?BtUG&ja^@W^(Q7xor*8)+8qjoW|b1GUO7*$3tFw!OnZ1)k7}vsHC$pF zw-{|>6Q8*&p}}KieZuY{c+q#xC!8!>%+XT@I~_MsYY%op!wz?!trzR2e$`diOqsG~ zrAn`-79&&>35_F4DW(L(&m z9Ekvh6L7@dDXkm{&z#6^z0HuCFT>GEYA43LU&@|C9paloG9+~5G(`ncx2b499+YM& z)7hOa(#2Z~y{iABF7v8J)ss|#LDO2hE;Epro9^MM_P&Nttbq(dbFEd7q(h6h!HFMb z)gu>&Oi^hC-k=cNlT}5#giQN3CV`3Dg~Q3ei~gn+hi%`A>#CX3K_e$|RxU9< zdTKNlko^25vI1Fuk!keiSwGuKm=Z1IWN>IhJ)Ehj|YTv3;o4t zgX;6+gHG@KawCd}+=;@9(;H1>dmX92E153u~WFS0$_HRW+P00w@W84Fh+y^Vpxn zZ5qtRww+g*({AK!_NO=!owXDFWNoMAa!40Ftn?OD-0~>nMDgO28NP#@)g*l4tulVw zUg}9`615nwojPHW-G;D~+!y6!L-cms$K`;g=am>GeiwlB_2n4TRwa!o~1M#~mw9$RY8yJq%d)1u%OX-y;8 z^3a+mOljOEiP*2F!10?t?EnQn>u6lj(;bFE{=0<{{`R(urauvMkuQaw_qK{XuWMQK ze)HTkJ_=vuw~t_>c?iSMvQfHzMOYIDz49T!bJl}J8A+$t39TLr(t+d!QVvCN?W@i< zq7_B)M#POvh~uk<8!nPp5|2fuq08k2qJ0?wd|92mXHncVK0%sKb*JSJFqAPk5FHyu(Sm@3(YEI!3A4@=5wli_@ZNn6C0EMhvhyl~t z?|}dK+KvC_O8U<(UAaE}n)DZ$?o}_}3f9mr63oVlnJbC&cvL`M3iu_Z(#nvX;f-N= zXDBHlsOaBLXIuUMMDtWbs#B StsFA*^K#nnXn{ybpV_(Ly`TetC!4<|*X^PQXi z6#^V+XgPS0DXS`T>$u77d7G#tv;J@t&J+U;xe#arTczAu=JJ8D^UUYov- zVte!JRFVNxg=_*OiU&l3oD_VQ><38e;5#w@+tLHzf%N`QkL_jL)~B>Ruhe?dB9Ls%-((Eihr+7Si*UTPS0LWF7G;s%bUE_4{JZ1Pml2Qf z;nW}fCfgHzCZBP4O+L4=@J3VU3pj2Jt8%P3!i91TMX-It2gz}jQHMIqx{%9%r1r_c`PJZ?2JyJ8R9A zHRrtNx_{RNvuRIF*6gXlE@LPrE_8Do$S=$=C&hBhFF>AMRO-^aUR#k2)Y-TJMv(l# z1IiP@m;O3fgM>009~8Tz(xc`<oDy&RGN5>2wjP>IeOjVLIe3 zBrC!r$gtT+1b}?AMhZS0LACpm|Kxh&7Z%J&sFcEJp?5s$gd#*wn%q=w5hWm#-;SYu+2d+_Rl#;zeUy7J~8vcB6_x}ntrcqC z8gF|Jhdm|nY0Hhyg2HEmey-ng-MrVNSS@$>%Zs8xCQ8u*O3{Pnn>_kWF?gbG?g*EV zxjC-Dm5Jzt%Ec&R@2Qnzy>IGZ@5`IIRmC25w%>f0K8$>8Jz#M(mZX3@u?vudmJ9so%vfm+PzxBrI?iN@u&qet<1<%n=O_*oJ~kXJQ5d zkC?clX&W=(kk&vE*%TlDHn#6xRk|GFmu2L0`ZW~z88_UP7{z|NzHo#@L6q~om!HGO z^BOT3S~!s?5K~2Qx;t5Fv>2LFjN0iRlp5t7la5C@5r`fAvshnOd||_Cm}IPaZ+e$` z+hy&R5N%9qP%dJzdz+##-A%f(P?Q*Das+#zJip4396V%Cl&rRM+MS=Ddm)jK=XlA@ z)yY>8OQU+3)TKxbZ*~# z()Ct4_RRf#IxyWO88%J&vI^F#LbU4)^$-XpYRAaiNd>u_9k!Y*8|yiITafC@(X*WJ zh{FpryNN5Gc|vV(%Je}q(9w7H?8lNd4VsR1Xu(5hS71_;vitSWl(a7Jkj`SuiBw7Y zI#kiRj|}miM0Ui%6CUMI2rMPZ0cg9O@xE1lU8$~ZVxxRV9VIqLF`X>^opIE_1q%g8 zkeFb3Y?)XJdY@mlsnR8O`Lwl$r*`=+=2n_XgR#+&`4hQx0me0#V<1_6*%?v+aLK=k ztj(XtE#VX2AHYUE9z$;wD4WtYEX>SH))|st?4tY_jKcRxgCglIRFIv9z;C5u=L@b~ z{V*#Y4yGIthr7R-hxK1Vb`lZ-hRmTOF5=MA$=rOR<2Vu_&phKm!L+DAvyzr9;CH0z zra9OZw00PqSaXhpG)6PLpfpq$p@hvSRBM2|O-Mi>(Tm0SoA<`JV#iT0=hQnWo05p%xec>y}VoTsk)$xYr+Cg{1`vr!xnm+|LbZlGUJmti(o(|}uu&~WC_ zBWYbgLMHq`BrSl9?sA1nV3E=ca9}iD z=`-t$ua_tttXLNAPsLO##V=4ps`PHQa1(kc;INncyNN;1Ui>$S%5G ztJ#114?s+}E%8N`r;N?c=1KE#cyI4APFW};>TL|y0#Okmdaz~>2&z{aK&!XF6eE`OjuGUIUjreqyA!t z%nVb0M!sslk=jBg@;h+~UdhVYy~_}J^L^cZPCJ69m`c{<)9kE{XP4fbMAj~T)352H zqe^Rp2$PYyxvh_U4s3gYSB4sz@$&}*vh_I2gNS2U^U#;eh7PCEFN)l1!Z7ysOjFv1 z@BGF0u($hyc?+Au<0^|XR*_?BfOmbs+R>=~#gPR=8wnpiJ7%FL?=&yNOl)@zh1hbv;C(Jr^mkh?Q-;Wa~uMa8|QQkJgOME zAOq9231N1vQf}3QIZ9ne_UyiqjrnUHDQ>B{{i=y3JtWJGQV=t89z5jEBJS~X@`#x) z&}xUS;5bj}-Cfu1UG(*K*`tfa0`kOwx6m3!jl~kM2y(opt~|&OG01}D9RTv(`3Zwd z&}mG4bX75ybT($&zY*&<^`m?p7IZ%)Pa>=iQbC9ycwFz)_o_|sfQW+uS_SeIM|g2TkjD<#+RH|8iSa2-|-%3 zNm5J5!aEM)XT@@0vw%>sxPIhhQFg%;l`d!zO5;Iax?@|gf-Z3soybIC9ig}O8Wk8N0lpc_H~^oV|^#iK^qa53O%Y> zq6)SL!e_g?(&}i~*umGWeNC1ZUz0ObJ}0Z}o8B2-QCc2*3jU$r2Ka|kL2aZ3DKFY8 zdc4&!tW&Dkv&>^_1{)87xqIkjyDgXmwahXGOBLp?i<2xb*kQ|#aPpQ05um5qsYl3iR;Nn&8Es@Ho#wdw{7R8xGnED7l=ixwC+9i9Rmjx-_>4blf~vC3=Y(u zHz8)JTT4+1Kg!&1HPzfwD*H?O`Cc)K^rPe9_tiIl083?mJt(~SGDqgxaY5#T>E?U8 zZ7Hfjr#r%rB5a;{t%MJ3=HohCm#))4RMh#x!jLsN%*j~;?tI3I1F;!Y#t#P0GU;5r zyg7gO*MQ|eoj)wvwYdDoZ>NBlciMoE5&g0tY3k&WbTFA{s#C)tWEP2L zV1%}nV3zWJ@8mbR)p(vNkh8673>C;KxdmeJbFz33Sm^oBJNp9V;7~DZdFsJ*+35uj zgVgq#iO6)z1OLI4z39-_(=1o%zL^5RD|ku;&;Cv$cSDKyn5X;NtM|?he|g=#NYVGD zmCCr@TG0fP=A3Cq)zlR`-Gx9OhlL6aI}9#HozG)_tNaGCf#Zv2`E&=2v-8j4?UoubtvjM zZA2iKu${n(j)xN~2PZdG497#Nslpd{2VaGX_jI?csNR#HD--MI*R)~_-He~0oLxEN zTgDyFhR)ZI5BpZM1j*MhdPa^!T;kj;O?El%?wH}dUa&YWoC_B>F*Koi&jf<9!Bhgp z2jL041AhQ9H+RTQrctGOV`1A21M?1Jo#rv5+VSY|K56fpE-a?JQ_F)|Ftei9x-dw2 zf$3v2&UL>X{v^kboWFaTfgf$>Q51^0k$PXAZJGE_U<>&HwLP)F07&w%7g6$G;|4iT z-^Sea>$VO%azi~izVkZY7Ss~lcHwk{iXnSye8ktS9OtgykV_I?);tH#qtzp>ibkkc zI=!m{!y~QJ5GA&GPf=n2F>d$uFuM1kv?%N9a*^J8}3}l$XDhCaa|| zXxq%K$mrXS45b(oVmY}`VCA`=x8sJrk|0ITLTX=&YdN}6%mX%xXjZW-H4Zg4)iGOC zlRi(LWVqifHQw}N4rCcGzdBqH{8WtoRoIf*Cq@z--t&={TwK7f1oZxT%EkMn{0>!z zsn?-E^ASn`Zg43IfpWfaN@!hs;kVL~sLuYY0?0?lzVHy37;L_xpdT2$yPWGpDOCzf zRHjDbQU>Io7Ytf|-%pim9(P%p{*XL7hT8dQ%gDw)Kn_e}%_m-mk>^u%?DM;Zg(W!# z5}qOVN7PLcE9dmI4u&Jhq)vvW>7Rchw^KfC-dMEj9xqJm^`dZ9GoMu~mB8gsJO(G4 z5fJOjAH*(tH2oB?T5$EnZ%@*o`1OVk3a%z`VOL_51IqmM17&xB)ooUEsgX4iB3cE1 z*JEj~dUEsr%SX{^4@Je^YDsFkm~LrgitSc^)e%?DF63W$P_k6|tAlY(`-|&Fp5B}M z%#H;-F0D;SM93}e@pW5d_#B5`_U398;}%J3gRvLD`0OCI!!m-k#^;L}s?98rrG;cU z7JS1!xq;QwPX}c;iHZ0mqJzy+bJg?yxXlvJX7*qPl+Vznf9T`*HCW7n%W@M_KQAF- zNqU*5pP9zOPrzuhvq-%QkKIgg^e%G(0V9_#3>+95zK%)8*R-3oljv>SdRC!P#AS=- zt|j?t<82snWd_Q4vD^Y;8BLgAkBsLV0!yM$59z-(#_bR38|W$LX5ULdcxxM3!AoJL zW8(l0$7BG@ufuZPsyg}V)yhJ z!R%MuYA$!JTM9aY3M#Bj2G*0D2F)&XTMQCHm>23)F_$;aakDLiDxq|uK+BrV&^=<~ zym8a>>$#b&shrD6^_h*VN<8elsvW;PY^L!iw87$Jg(74*CIF<4-`*@rHPU4C9U;sm z`qlF0F^|kTV z-&V3lqq5TD;pdmGXhS+k47DA_MI$1pP|_@suNl-5abr2)o9W3&V~vYvF-F%isGfK% zASg*oq@VV^&Xyg}5f&E@Mw!J)nR zeD%x$Oi4i+EAru?-}xFTwoJ*Hszith(NQV#WY?NNZe!Wmw2s}}zn!WV^7f>z!-T`P zAqJY;Wnb>u4b33s5j@e(t2rJ0CJ^vu*-u5nS|kebf(fx9H#F!+<@Z#Q)11tM^Sqp)uU=@Y69rbwT9Jf4H6oe`PyMnESFRhVz1Z#Da6D)DP5?X5JQn{#_E(Y$Dn_5 z$s-CNKd-k%9BFlwI_)T~UG((I5e;NT39@-?KRY3d7F4IfQmX+L3=s0bYIHSQw9|#2 zzq<42W!3xay76T+LJ25S^JXh4iRCx?^>GcFgJzHr8m)f^C zJ`k?2rj}{%7u0u*AiFH0rHR&p$|t$KXKNdu;?HQ7$uEH!7R4BuD_l`yDp!3q!Xx!Y z$5!#|*uoY3DDRxqTJTPg`;9JDpcq+Aob%{7UV^4CmIdGq%5%adH%~8RT&ONHNP@mh zi^+NVtX-`Y&J*wBE&e9NY98zD5|5{ZzIFymM`8hT%gXb=?46U?TdkKRTu0q?#G3IN zi8e&L1g%N}Qje$;4`=EYk;W1Lp%;2V$s-uU6wen~mZ>JoEk9E77GpP_M8?)j@%Rfj zHoE0H=hd9Dh|8%3wJFOUNP(D=0s2KT(#%J!zR+$tW^&4%w37U_>FbX_32#Jn_z21? zk-fhga7au5Z7C)MG#p0VRa1i{Q;KnE@OXtn5uHiTFQ|&ez!h`O&c1o?@!Ud>a!4_q z8M6?om3NpxG@w{7N`bQ!H;ik;27+yfe*hEh;;Orr zDm+>}9ZcTSP>_d*$$?&@nyZ3EuxMKalfglDt{9}Kl@~z^^uIiqBQU0OloO;aCx+u6 zuhOa0Y_ob{%smY99qjRS6`iln z?gQ+b?_yu+Nc4jBw0YeI5ypl*ddAEP>OlqL@Z{@5myX}>$==RaO|&a|By6pvl%N&zJXqzR0sPn&v_ zIpGQmV)jk!zpXRsK9q_&E{`x}t#VBaH;m(^9-uYz@)5cDQN$4pybD1h%kQ8n(Cg)J zRjdjAdAVaTf=C?23Pq|iZTzL@#O~=qTC-1->KqK-j zB@%XBQ0Z1xZRpPgbY7U2gGdDg`pE?!$56sb7?`Axmp-2hOpQS9f}bvjl@z3=Wf}Oe z{p#}}uBG60n|inTa@uc46Ss9m`pAIu>v{2q2L>f1#@==kkLh`W9N(TV@{LyZj`7&` zlE{0Y;yo?!tk`4Y*$_Sn+F0pl0RVC| zwG;KQnFO?Bdvw*!#E-^Rj^Q*}^>fZQSfQ8Ts2- z7x>Ir0ff$GCQ|Lgq4r18D-quQWZG*~hC#>e{Jb{lysztLJ*n2g*HeL%FbH;)Q<-4| zJTMhud1Ds2fGC`>9|6i8O)gZI8a=_vAT2f zJI|oq(I>llC>NK^_?FF?Byk|8<~dyq9DB}sp+UF#!u^%USqI?W_KcAePScWNcCjY zJ?`ghp3VJa7+6!;lu$M_TuajN*tN039rgsc#w&1{V&F5lW2t9_u}vDRn*+l_Yc2B4 zrn+RsNSuyGzqq>r8xx8aVAw4#aWw~V10o(2=NwIylc%>J(b_em;;H9$4t^^1_3n+? z;cssj8m;>WG+R2k=tQmp;)~VkU4wa?>6S`2q?4aaaliMDwQaLvSZwHj(g2gp@z>3i z6|$-~mb8_kP}l>5b!GC@W}4(Dy*=%0G<)Pc;OSb)wKlL$F^FO@^Niog$WOxTswQ52d06rY+sR>xsj#((`K(e>tkRn1WUYrO5%M6bot zE!Bs=h_^1KeXxHunKHO*;rN#%^~qn_HMUkq;JrW0-kywEj{S zRi3S|TR|(j(L5iBdVQD%#e3HLV5FR7R0fgwuHq|e|Kea@C~13Phx~6s`+p(Y{BJ`0 ze+R7o{}G{GS;g=Npwqtc6XE%LF0#t9vRdI6WY7Nu`1FS*6}H*y!k*KVKq)Nhyvtd2 z`YEru(056(Gs~hYmpZkrKqeq-Xx9&=A)-ac@hLaEoBzS2bQmD!;T<}{r7`=vTiP!z ztlT9kIY!Z%TGw2TwXYv%$z~9;-o@*F<`BD*-S%Z5d)sA=B)iAuYm8bMJ2@q;s>yU4on5|L4V&{WE0c zQHk7-K@K4~3C7F5#ozyVEr$OLX|bf|^(9$K2W`oFed%BJ4`Jp5uYKyjyXT*``fq=q z|8IAXIlF2KL7!mlxX`3o!#N;BW(bwrEaSat;qIX5Qpl;Mg$B}^HcIdo5(z4ZraO(?-up?Vf{f1e~T9_Xr+?=xF(KK?$vaO7$gqqF=^mgN5y*#=tt zcOn}99kTr|$nw8YXa6`5uk3v|Ep;LD&BXNw(Cp+r_xCa9Kj!iO@>%_#!vAYmVDZYf zKY*GW*Z%-6eYkeU6I5gSf3bkLAmbdj=`#Wm4wK=hy-l2}3PQ8N->`b>)>Ab+^%u4% z<-yhF9MbtCDu`l!5q{--WIx&Y{;~b|!@<>ag9QIgmTQ4;!+v~w?IU?Z3*DEKvy>U) za5YK_fS`O&ORZDs9RQf|iryjIYHKd|0Ameq=-4CqM6q?Y7`PYFucVCg;?+BY*+wLU zTzJPP6}%L{!(cs^^TQtr{g$5%2EA8F+am0@(ROcYO6^Je?QD6c^!f$GS zKkz2zOaT~;(4#aeXq?w>b^Z|k^P!Kd!ClFG@St*> z4x*B3I{$S`4W*3aTm7=Co$iIH#o%?XnV1O5u{QPlU3=)EM^BijpO;a`H>t3)-f6xv z^fS-TH$zg1|MX+WMxOn?j8>2`bkuD{C#NWSJX5hU3eU8C1l-7vQPMHZF9S>$(>nQb z*UT)$1oY$YCSIX`tE3*jLp#`UC1N z3@;k}-weuzuN9Fgd8P^LG#~}++wly1Zfb@JgG*w;_36dS+6@}h+k`OC@;3! z7!=4#1aiJ{>`JmQPgdt=2xBJCp+vCDbe@SlK(ry@2uK1Qe=TbRkdlQAmJX-ST_ zOaeK1G{Q%eEjq|(q#-5Zo_E?J@rAsh09Yabc&nnz-0mUS+Sh?0>L5B@%{QMQ$~Mv_ zRz_nx&LY1+2u6uWC^BTS!&|U08-bG;i87>R(bM}p(4QP`XOX!;KTH@>k=-tkVwmx* z7{fb7?bsk`{S>1CR>n##ICGi337bt3($ly=rq_hT`%4}du%^d1Q3EzTgvA!&IoMY- z+#8L6jp6uI;}%UO72edmM8Qwb?XvTZH`;fv=r14nH5MEW{Im`$e_)JjQl?>@hxAZy zmW!9WiagoMre!NHmz2Du*fmm{!#wP$h1rc0F!MGC8Lv9@QMBYa zd0(#o0^Hq$tXcJE%AA}nzG0PcU1fBAZnZBJ9S6reGPbA8i$jwsi&-pR!8P7TL=}Yr zo1{^7<~#3$T(Y^wu(_{gKGrgd2265GbvgzV%GtO90L(U8-BEbaLSLmZhmsit9u;&e z>Tzq1`ggq-Jj&{O@^8uGTR?haa=4r@%@VAzpy5~9ptC6R-*Ys&v=SHp&K z`}rkk@p0%3=C!MvsI5(l>)m;S&Bl9^yWXKsQuiT`fe7VpOA#1gP=KKgF<^wIsl8M5 zVUwAr+52&u*{@2Hv&2|rk3L641OB21(yEsn^+Kj?XU%*aQUT$ajjDh2EUo<^iJ-?TdSepdRypn{ceuhtZ z83kHX(^-HPQkR*o))s|e7(BbH-$Z*Zk)_8%65{M%IZs^cnOo~vqnbZEA{0lAUgM`X zqjVH{cdAD$tFF8!NY#6>wQ+nj;6CbJadk&Zg*cSd`a}_QIa&AD2y*F4@oY#A{m#Y| z)Q(wJI>eO|R2WaI24H;}%bp&t$^k1*>Im{91(V6tP1np3f@XrKlESogL`)0UD@eG$xqsO+BK_X!(=Fe;*^}KU5>MSHw3_8w0Tde%_2ua zCJbnWCJovysazoh=Y5~GTyH#y&fM;#xgC(M<~h;#(k*H~Q!o=*&>1vYRZR2|amkKZ zC2uQ(Qm6!p1Y05`=P)+#T{ip1jPsve-xgO{NjZFJobBFm3>u9ReTnW7OY~|29;RKe^=TIKc0zd+}n&CCu-40R>h?}o1*}&Aqo6!2;Gq=my*RGU@LgjgbDKPTBgI) zwYcNEtu)Iq#z!zc8%i7P=R^1@w=^Nn%h z@GzH`;1; zB*pTQ51NY{mTHcasEm_WQd2fo(oEsHW>WYvav?_c!INe=%#kw{`R~W&0dTvgn0+WQyL&cRzXvV5Zw?{yFoMqWvFAhXTND0vhSt(e&XYg zspq;c=qxEQqwc8{)&>DdX>~CWag{sZ@yc!&TsEQ*Pu z2h~JK70=$5=YrU(`sGlg1ig?mir1zbK}c8zGl|D?W-ygK8ac)zs}w$c$5zi_=`v^8 zoYiQ7dj>Ag4jrnbd5Jyme)c~$v3 z#(hFe#Ldb;lomKGko_k(xyMh z@_?BoN=L*sp0WY}HXbnUXQrgT4-#(vZv(D{CC~4);MT9$H@~XLEo&^S{l9BYyxSzd zd0x!`^q&5Y>i+)$A2HLvaL)hQ+{D6S@R!%g-q(i_56f3Fz2__f{{U?7Mf|?<_SOM> z=fC|f8+rD47Vp*11x@Wzw^KIe4La8on%s*`)y#;wJN(s44e`etSJRpWLv@5}>$GBI)?F{`$AG(|?*^{l8#r)nfV5p`rE%wx!9+ znnzD;+>2r|v!$5$wPsowX}&Gn#0ovn2|*x;5T4#fy|r24cW04}e*o>R3f~s>nj~ck z#59taJxvKUdict^cb^jqUiDEuBw2Ged;dMzME_^LYcPto0cX|t* z!@2tXueM7SE$2_%Yrb0s2j~ZqKH>*nRo%J_c=AR0FBo2zm+e7YpscFv4e-CD^UN%I z^_;%m;$1D8mUJO5W^(ZA1& zQ-62kvRVGk;@XuzO{jI%$RfNr)_GvbA(=HES}1^_DL0M?z>8JpR+Vv@Jp&_dtQ%+p zH090vCuy@&AN8BZihQH*jEFihuwSjn^vp>+D~0BC09*%3t+WLL7@qWqVf|I2#N@{aD@ZPuVhvPv)(g(sU1PueSdWNKvrDKh^ z+I2O(M&%Xk*US6)uhAFLQVR6@b@ug(0RV^P7nqHKv62z!)|a<82Nh3N9p>*=B|Nok<%y+X4W`LV+&1yVSQNC9iWdpb$)QQh(g=73b*a2aNfJ~vCl;Z zK&+Ub7X|uDpg}70Be;0zO(~uchRX zhac*~`PmgL$3QDsh-UlVT0f!Mlq7W5w71pSK*n!c;|lvD=R2w|sscuM)y|T>E&p^q zotey=R>SGynUXHe~=w&n6p!=1yG;FN=uPF2kjKyQbB#5!n zsl%gEcOmkB0HXT%cP~UThZs^QHr6EqWSnYaB`^>K1!hesL;!D=+M_1T_^2&};VND5 zl|{>Y8s&6aL)hqqJFW$zV*9gp3ibn1Gc+@(LNgEnK!Kz-W{^NB%F-m&vLiND`2D6| zyl2AkM3l);--QQ$<-Ku*!zupu`%bkP2XS%TjJQ7lqQ2mdcAnoh$`ui&?)n|W?%g~ypj+Y!AgcJE)NC2LWsfMeaWMWR$D(WmN zNiwDOHhtQ~<3Mu+z^GTJL~4VKKoPQlxRm}%_M9y#^p(3rh)t^5C0A^}$F8~(2W<`%ZcZ*S28k{OF4sMf4BP99b5rd0kMQjj$nx~1Co}#Q>ni8@|xZLMp!Oue))j?R=SSWy&%$XSgxP)(NTM^#o z$^H7M?^JeGw-Y+s_D(9YQdqW+c}lDshQOM~EMrn5zDbMZuWkd$aM&fG*KH;$&z{q4 z{NQip?BZg~rn_ILx~G1B^VOBOzD=4wgI5mA=Z7^9eR+eLD*_yD=Wkc(+QlA+MWygH zr|f#ARfV(}*b$=^{IJV#$t!Hk1Fu%;};|%$rGk0=0-=>&u={t$Dd_ z^}e)5$i6&$Xx@{Jeif?{li{ZL>?HXt91p@B1Xg)>t-vwk9R}R|xgdV^(sW${tvXXt zA-Iqa`wudY?Y-6kqx%kd=CygQ`egB_(ZIzPE;>fN6VvsG%S%&y9cm+9{+&PKQ~e z!%9tJlWBSy_9ruTD?ZhEy^0_Iy6nH<4}gz{9g}8kbBi>R{4u6V?_0$rUaqtveNo|{f& zzx41OylUU8)!?#tl#|_~bwIY4Y7w#u1{{j1^9Pt+lx!5H*bKW?-t z4X`vlcA2y|bOJI141&St`~ci`Q^Z3HQfOgA+)?ZfiHyEpC@Z`?}BFNhT&_ zhzYH_$G5GRGp{;mt1))`#ydPh{@M-;>PPsnwOh92S_osD0l_mSx~Eb#-jb zH6T?23-80>*km(RE3hP1Z|9XD#R;yb!mEiq?;4WbH})EpR{A=!^s&Vou1qxKWleN-_I{f*VOXlX zrb&PVFc#WS({ieoy#DO!?S_L)&j$7*efDEsUTO#P)%c;pWFmJyQT16N_I$bm&*L-l zwh7beMQVSbsWq&2Q@}KQS|?g1G@*@xHt6Xb(<(sqNgh8>KU(sSrxPWS%YL;tONCFiJ~9Vkw!#0HCmY7UH_h z8hc!)Y+DKHcYf6exzgEMuD!MdH(V`{!EBXk3QahA^S2Nw6j;$8Kbr-`?>hmQzv)zc7e1ow4O?w!B*sf-d8L*aaz#O0BC|pq!lTLJ0gNIyHlzWeD~Ymk zYwhThd9lW4g;-v5or;+HjL&q=ub%( z-8a?syioDeRgcErN&^6%_5#flk(%CDKH$+X!sqB1Ke2*rcK!hngjTg2M`d%ao}Pp+ zZ+AIwkdOjgslpj4Y~NZH1I%9=D{`6%s0oM^rqc9n2G%ATjT^S;@nJ76TOW+$t8;$8Zn8fXU?OyP`S2Djz`ABCejU^H&BNO0$M{(7$pATFH zO=VKTqJ-(ql}tSJeZcShM5Vr|J5DxOb}C3ofzZ#&{97}FyHz%ol8tSzYs?uBA45F& zvHqKY1^3r3NA5mlS2LTFYjEjCz$o)-7oNgP-3UbZx;iz7F${bA@FZpQ7Vf%9f*+3K zyrwnD&pO-2VfD9sqHG9v=d9diaU?&pfkxeJz`6O(RZ!8e;E0Ok%3~Qz*RlM!VN!H( z#L@(&QM*A$X!FT{*hW}YzzhY5=tJu!6VE35038NOZmT+tnfPMx*#AZSX|UX7`5~~g`GZ~I11t|mV6|a4R1Lbl1PeQ7G!)Y9&tO$RjjeOgjb~DYEomgp)aHe z<;@Bko~*ol^Hto%nf1hmSUAW*)Ub!MRjm~=Y3QOyf=&*jaH4km5{)@wenI= z+2g2=wA2m?_D>EKWPg;IljjhMJOHDz4*<#pjZGrjc+7DBkx;ZRIJ%L_EU3ni)MWac zbx*GpaShyO|jxk8@nu| zCMH}*?^>M!GcO_cHsR}bW@fAj_FH)_YO}_;F8DYnS<`ZC+uS>+0IuuMWj2 zp^RvuKtJMpg_r;<3Prc@1GbP$CyYnlFxWO|g?r@%{3 z!`13_<)Qt43wlwHLap5`Q`{Y@vB?hxkS`Tl>Cb8n_i;YvRtoyUyLqTJ7jPz1yo1)! z8Z&GMgp%bCr4pThBmnnga|J8Y3lCzl-Q4D2zf1wj6bxN|Pgc!Pe0I;Z%jxe^9eP34 z&i8gY>;QU{Oyu!rN+Dvg8ST+;x1J5mh0rmXGAoA%``k;wtSvH^OyE~FJy-YKl4Izg=%uJa!qD|!oGN5 zlAxK!)9O+sL|~S|szeW@>LV^W8`xgdRpqRF3Hbv!Yk&F&(1`deEf{$D&d$z=KGkXC zg)i7iQoj;&$Y4V_=kI{DV<8^NqI7APE%dgQC=-)-o&{_2Ryltv5bsQS^|yc~Uwhxx zwfgq|VjI4EAqWsarb&2gxb1X_FH>kJ*G+jvA)XdF1aJt8&3#{vo8;y-3)EX{RQNTj zgdVnivyGvWXp2iz_6Qo<0EbSac=u)57z!k`EVoeqr}+zGsBJ@S9eP zXbDM5&?bqHD%dNO9;y5uj_P)ufwcxi%8d^yI2#6BD_1eI?ONWkd%51=E0V#%g5h<5 z$4nkO+#nN{jo~g+(VvvfPVW#ev;9Z@U4 z{{v9*uj>0rQjwpo_v~Y}RQ@w?B%8g(0$x;+!_0s7wzpuX$%5^m zf^b9W($IX9(i3Xv_H|c$u2MoL*&!<6^tP*97J9KlQwhmajNmUe?<4h9@h)Et=J^f2 zm@yf&b!BoSU@NH#JT|vwNTwVuop4qKrq|`aaJo9d+CF@o{f2RrLVb8*sGbAr;*BIB zO8#!Q>kryl>eoIt(o7p7loD@Kk~Ci_lVP!BowPikflLg`=b(knE4;3wci2d+KtO8c?QnsOP&*gyePt~=8uG!B?5<@a5anR^IZ{k$onQ$@0u(JWVLfjPmPy0?=XAfS9A|g2O$jUU^7k6Y!<)*ZFrw z7R{ge&muUR+2Zw+7|oyMHg52jAGly2IedTYk$1kY5_)AVCDu>kMv qc}usJvBQm zUCaK&i+Ri3LEJSVs9GhZsWV!I9-y33$^G7D+pzihnt{xQL7(1AvmI>gTbA;WK|!+s z%wMg2Z!8BwWFnOjMX1i ztcZ;qqXH-)5d#bww!;HiG_aqtyAxNQ2}sI|9e6vRN&+kRRPC3u`*rC3VxW4t?m$>l zv&=CpsUmhNIzzwF%f?XhW~5+dcvpyPJJnQjah7l7yg{A`iap4MfMi9P!)V%ixT!u7 zI&j`jjLRk8G_SWw{Wh?Y!?C}wTg|C|-78NSSx}D4HkA?2j@pq%?c5-lDPE~7YI|9) zby{tsSU%iBr->S9elR>7R=qSWY#~fn+rb%ofnXwTNLNiL=@Qej^ijz!KfNg9NiNG_ z??=K)9JG0&5LXE~mF5`ECIa$0OQtveFPEYSMYMEaak``QDOB|OXTS8rv8l5C66 z4p|9Q?6y-i^qsq>X!ssOzZ$<}IoHLuD)=^lunrNPEIER%&<=D6yY6A^q-bsRgjN=> z-{-|2HeHApq{m%|7c8`zUZ^QUmbQO*_3r2oicd7hKS^sBwiy{nXp2s4635Bv)_t^; z&R$9BA*aDpFdZ?j$=7-<*(x+RynJOuPK0N9`Q5sEql5!}8Z*AY7!cN{*{v==e93oi zV_`pRj5SGzUcEXOFU3N8eHQd`QXoUd#R+;B;PWl2(a}G@?PFi&R%*OFNBYyX6MsPi zra1tHIWEZKkPV45!$1AFX%=?G5`W*JTU5a!Cenp+USZM3SRg1~QA2jB{1YiJjI>4C z^lEK1c*o)H%EP^6u%z7_*|O<07bBbLyr$jX1gY0+Tbls;QtR~TjQHO;A^W~E-szx) zAMu(SJ+3;?Xntfq{9_s8DY#KmbgOUGz_vZ%-jddH;}C#YbyN&XejIri4a$Ux>GEq} z?bUjJ8hV4F3<;yn_wdwdc=J@C1jqQ4rYT$}ooEVXQ9>C+8JWvp>r>4I0Tyf|k`u(& z!aGL}(mS0CO5F_s`2W53iZPC3ken9PE`T))Q zO3T~Z_Y!?ng}C|(XuEE8sMOip?faMUsC^F>*eGioKptM|*;5(hdseo1WG(`^;05F3-v^xuom9t%QMc1TAX7Ur~C1PP7@fb<&N)u-B zc!qcwbx5at zigILq|4n%tzq%q&Di++WU1v~N2AD+Bvxhm~_S;JxAX(T8p3bd!_Kdd^BBVV6&t(N^ zI4m|P-ro9JV(7{gkBuJ#kU2x?Ru+$?TX@6fsH9Hha~nO%xhg>wWV3y`tdfH=?qV70 zsg^&>2`Uu7TL)KeCFFrN?#>%~e7LINZlr^ELpthq%Z0q6k1W!Cf$UpBr~u?A#qlIz z-rRNc2j-xWB3Tq-n9y$)4gy3rU$T3DRD1h>7~i{ayq@54rymtv9pj0QHuYYszC}ak zb5Lqr08rr6Mlf7dNVLNK%lKZYNsv!NoSZHGezW2Ss?*eo+vDS`ve(}W35pJE z8+_sl?rlnmWw&?6NPhMC9MuIw76%TXLMSOVnl41+LPVjfGw7``?C;|z!3q(*i~Sv( zG8WOU*^O}*0vo$Vu7oMeqCRMwL`Hm5;dM2JZoZT&Db-?mF=uORZV1_e46)3kyzp7G z|AW2vj*4R2_JzC2IfF!jCN)hEkc=pyNlk;KCg&U*Xp#yjIcJ&-3P{d5BS?}gIW>r6 zBq^vU#;^Cj_dDBj-ahA^``#OOj5o&S4+b@=sH!z<)mk;@`UU^PV%Zo-BaBd*DCD76 z;yq0E1k37P3QuWrZH3Z!Ncg*?Sj7f8;IfWUl)`q9-ERv9#?>s)KI-PyX3ux|$$l4M z6Pa9^XXJGjZvK*O8BLqzel!ws)*Jj%!x|U|p!jDNe35l!Vud~auW+uSaJyhshlndjA%bN)ZbK9^{1b&P3pJwtH{;r^OwR9} ztvKQG^4`SCTanK@AF64o7{C-bKh8+3t8K?KNA5A6 z2i;N08mh7F_6~EdX`m;P;0mubT^YCsKp6{XGh!k{YujjiFLyOrC1$c;YP4Ec?Gjq~ zT#^UBt+<+(bq^*CO>-&0+q)ARQfbv^5@}&>qT&nYj<_O zZOHnawj;-S6H?PclKju;l?9l)PW5$F)vyxqV!Tv~qBKNkXctjPOb`N(m&M{TK-j(W z%)VrVm)|%KDI`5tNF8Bi-Jx^*Ry0dzTMOXJsjkl`855IR>%!EM_UOy00I{Ov8rc3y z)3OSc8Q-@3)+o&|zjQ~`xo&C*>Z7WKcAOlc;dm&zS=D$Aj6^6=>R7Z0AgkO$0NH^X> zqh9=gd97N#D<~oNY#|EikZ~`mhmSxy-8+MK$3r@4`$(1u>vnis&^=8HV`qloPJuwx zBcj~($_!?8@%jt#PH>8w#?ZLipmMN3qk>CLOD(E9aO!58Z`oZNjf=a{&8P4u1*d@$ z3Fh`@ZjC8Tw*5r8P>vBP(Kf2yTqG^gn^Wq#%nFC4Co`(?frfObc%Q zn!py5e(A@suVdoBok&<@ykEwqyFBl%u78~PPW0`e1L93O=*OE?qf-H+X;)7hOI;@B zk#-dQGb8-?&TUh8qd*vtp5ROuQW-jq!$Gj-wE$34TyAj-Mi_CwJ!-_7#tnWA{4PVX zBOCt@!~@7;Rn^Xqme23M-RbFD@EFHg{EZn_0LK!vqgGFuKFP`Wnc{!m;srj{t(5+8fCP@-3~;mvTk;8n*~Sn7LLz}V{|f@W#Xs@A{Vvfk zZ;ikR#cr~SkA&_zH0%a7Xc$1t%cnj9W(5pW3wMPu-qxwz1C$DA1XwXxgfh@(ZEO;I zjO6GQQu6H5IEGYs_@~EI##sy&9x@-vk^{R-@3r_Zel@TXMWts>>q9DPQu}vxqR3&k z*>ZTl{m9`F^j6^iL$}1zHq!z=8V^3&`ri8^KN9$lIC#@mg!!YTC)XcEf6Drj*r{OJ zf>ql8=Ry3>Jrx*Afs4d32KnBU(;lJ3`PeQ{F1&&3B;h+B4{{4sl?_o+VlCAn@mkl* z@5ikGCa^v2J!XeI+&?#ZWuBa>!%z#U`ej?cCaCgj-muku)NK{)9aL5SvaQ{CQGVu0 zAG|kuMUMsTSJnNpE&F2pNoNn@Qx?~*0e&^Cs_U2SUrztm<^KQR{(UeP5!g@v67$m^ z95;UgT>IyLiMmM#a`u_Wr>xVdlQnDqExQ99CzBi-V1c4x!vl4xK8pu zn7(zi zSu!SO_)R^>*D?Dcav+jIbGw+E3MYGrc{$_OiS3OhkH!G2=G5e;J080yqk2=)S~3jL zIx=hP%MV`+4&jqj#nCHsL&t;l*^L8{XneZ9(w_hr=irCq84Jm~SHY@>+HND{kK}#= zTzo9Lj(-B)%1O*G&$YH8t%l}r)z?dRBosM6d+=qv&5pUvLvj^ibnlhYz2kOH5^q{a zjsg4y2Ozr;6%zu8^pyuw7&kn1oZH=bxK}Cg<07>)p6$N4^Vaat2OaxxCr=tAu@wG2 zj!KeMQT(B>wqg$oENVqDZFIh*Nx0m&nsT>!U2eBg5_@uXXpue&!)uXy#8UJ89m z4&j@))%~6ugXq|YsVEodmsyRw_R$TboQuocv&L`X*zashk zJV<-UnCIc5<6iy;ch=8jnS&4@(0Hz0h5;)lIH)ly=Yo%K)~E*}>+%&NQ%_V@>Bg!e#;l@SV&BKbuF{L!~nAMu-Xe4}r=Ay8*&8$jOF@tB`ec<(6W1FIxg zbPP^Nz0@7x!2T@V<&hE*uMHS*iM6~3Imr2GkMOY5XV6+MruEupOG(tGI`CyEXt92H zNyX%ss;E!3t@>h%UKtCxl1fB|<9Le|YS`3b9uDz^_m8tIC)nF;UTofo( zxNzaEnmt`i!Na?!!VegKHyDM$Q$C~zQ&_e=ag-56xc5YLqUb(@FYUs90a94JT0O7kw;J9j})yp zu=r?PGL(eqs4@{9Ej0&v_!R*an`|ALkl0*CKv?T#BVywdEw0Fyl~yZLzVx2WA>PyZ z2t_qMm{7G!dM0fLEpM?CH4PPUOXV4ElE-Q;Y60H58lBts!1?2iIaB;xLJC=p>NjKh za?a3v;72Wn^8-v-E}9;RFRGBI*1@kJkS!x;xzt=3mDi2>K2oN{DE0D22cM6Cv<<`7 z%@m^pkBoJ+2^NACbMhJ|Vx4OkuzoEZZsOkT8$V=!_8uG-sZ&_HBV36GlMP{vM)8RF z`=k3gI9E70^}gUH89N4|v?upyq{DfY#?}eC?h0-ea?d=tlCHC%i7!8$hilt~+3$GB zz&M9E2%RsD_AU3lPHSI~$BUJ|;IpZ|zw`O?o26xe9DI`QG3Z=|YeZZexH#feQfHAl<$#4pTwi|Gl!=>mQC zoftl4qky)jL@jh4IfBW8hll*Qn8oO|3A}pJhi2p=QalIM z4RO3#AHs?3NCry%PQgUNWMQoyL3f-nnOaM8`w9R6+&CaQ^ttO@M(c9 z{CF^{DT@Be#ot6H{?#`&RtDqmbus?5JoR6FNaG{M)SA;7c=;{`J8U0)XjZ@T6`PkE z@!X!)x;Oe{rFCciC*TdV2wP#RHT`c}`;q)zxeUu|`bRB`-_BT4@^*4i;Gv~gX3NJs zrfVLs_5_7AhU4-&`c2lU*(3*|?=(Mrp3(t`SMVujsq5crUxZ$LOb%lBH08!{?F(%D zC*W1tlS5GiS3T;jZ({-40et%K{YZPSnbMqY{OUXL4Y_f`gC+TQW}6b_zx=O{slfPo zLgNlIY=YyBf#rniAg|#be*&qzf4V_}TmrCx&N$3wSqDrMLUf4R+-Wgaoh)fzpZV4{ zYxKjLS2}lYH@*#fbyX`HfTny|6ZpFQnVV&dMP_vtf3pW_7}E99FhL6>4}T#y6k}Jl zPJ&xer9`zaio3)Qv!Mb2E2f_3H5Sy0(zD3iXdo)cPg;nR~XzzGThR4#9w{zP26nks@7z>;e=wA~k{Vjx*7N z!aMZh0_~JiN8;Thw^G0Ud23AxWa=63jL}HvuuOBb-}0uS6}c4#XnLVj#j2+v|4lhR zLeqPqeUX3k_UybEE+?nVLv!PgZmzb%O0S%!6%6kQMH1FpK=!yZ6?9FMop~J_h!GG+ zN~6Wo;x3EFdsB&!ckR#AS3kT3vp+HZ^xQLOso;+`*FI0s3&|b;0GMNv8z53F>5VNz znq!9vksh}bl8kF#$$JQH7_d~?;GpBc`myRZv~&PG1t|2kX%?!x_I&@lx>5Jw`W4sB z^7{(kzPxxw8o1x`OCxQ0(#{9oId;yBGOVTSdQXvqGz{%fDN1O{ZqTxqdcgn7=BE z*t)!chX46R9W!mgbNqw8={l*1h8`|a;~sAXGJJaJ2WKk;qA=chEhw(#+ILmh@KmF= zHnT?crE6@c6uBX!k`-pdA3~-}#YTpMi@^?^CzUgG-)^lkJM>-Sw)Xw=@J$)aeJg83 z`y07Mt=xfG1|wLCPnb1Txm6*8mGcl+`oWS6T^+hfUJV>bkutJmVHX!#7vTIhSrXR! zil_5iJno;j0qG0^;D9&)Y{guLGsyqEiP8Cgaub2KvYcKHOlq#^3wk0rVP0P2uTos! zcpC$tRFvZxVqgzE0G$l3Rf6(Pt)%R9+$EeGeMT;dfo&tM zwV}8Se3kGWxD>%zD2Vas9eT6nE0JxwniLmQewJj503sJ%ta0?Kbgy-8EP9cgl3dkSFSwTb;WX-8VT8-7vxPr|OH!Wq1Kwdc^7{}1*?#^ej>Q#-i(s8dF z;dtt5jhr`XEH{&tJ2##_yCYH`5bJ(-_n)D4yfWS>E7OiiT&IrdHMo5WkxCmGrS(0 zYIX}rL9<=x%};?M*4BcGOZJ9W;A zw#lm!Zkruj$G74GjI*Y~TD5_r8{CAqM^`Ri5}kK@PWaQ7Mys-Mfy0sh@&&QpWUP0m zDW{fqC^K`}Fch8(E&&PmZz%Kqd3*1}t(JJY#Ro7QH;fs_?&|`qpYNuDI|C{XS-F#} z#d4FKs&Fw@&qo=k7}ceE0E|2Xn^bSO{fHF-RCZUJSBZFp;a6WMhqWk9qyKZTKmDb{ z{?g6{GY3}3%U~AW$0}{t@=#k7P&>UWq!UbdGk^_LLimdBu1z}{SMQX~cg79E@V&84 zLxWM%|9u!bHCiN14JRpM*;<_C%RYBfjvTRcU`*v^ps2c86E_02d7fY%+7-nezFp-c;`n7)lb0bZw2X0Ihot5 z&>uthV}AnXDhIK;ZP*fTXFKU@GRj97BmC9iEjg0i{QHkAhkW>Mcr@QTwJk-Bac8qR z=!;I`!6P{Rt`DSOQ7{03lB%lR`R{Dl<@cCtn_sK+LV{>J1s|SDS3hsu48I!|z>~aD zPJd5Q?w#vmDG>3`cDh-ipY9_k4{VLWR@a1jY0l<5_t_Z8KS{_%lRsAC4#~%;>F`@h zpIqJjVECwz{mWuTkj75{GUUg%e{waqt3$#ctqxW+k1*wb9sj>Rjlbgad;dvYl{TZW zlfmC>9N}#iQkzcQKkkeObsa|NtrKizAEp%Bp7E@s4D+s^RB|3kJXUR=|47r?7s?298o57$kzMYuL$R5WbH5OKl(go zbZcv_y9hLOTv>>m_n$dA0RsXd-d37eMuF1por8L-W6f8P_LrAh!zu0rmy2~<7^m`% z*K9fSDtcP}F;W@sO=q<9e!L&%;70%=7F{raZh%9#iQjBoMx)H`TG~<4!?;EieIwHL z!-J?B`F5I#YuYIZsKuKTVz+#jN>ViwuqZH{Gd+YJ1oh-z9cQ0X><9nF@bps?O`Oy8 zp?EC;0;!`k>QnA)CdYDgmX?25YNm1QlLs8ZYF-{3`Q$|{oL&AIj~(njHP6h;zWa(f zLWzBQauLMJ*3IfG%*1fCW`kfeDE5#~*MXCXR!0dM(;W zSt!n0kZwubE0bA69&`Knju(12)=0?G9T%Gj1g3@K8OI7b*UP|#USjc--x?Q`K9|xb z7`{F&`IHglM*0jh9Uxd?B1R)(WCu>;6P}${P>IFRKSu~eEE>l@%(d0kkN^ZwbobiC111eScocmWb10Md5Jb+zM`;Vsb#3zFt%}Q!m6z?gNx#jZMZ~x4zm3Kd}L|Ufc(k8x%Par2T13p%6bkhjz$gcc^Q_}_56D(^RCjKKNPRX`8xAJ|E z>dEL*Tnz_Xx3iPLd-+JQGBH>IPK6J_9H8=?`|&yNv$~_hRx#HOHQ|BAVIN^fvAFlW zY*ZWHtgj2!Jsn--l9f%0N%7PbHCU&u;(#QN$Sy12iA%_U;G+{=-!a$al>i5LbZCsj zE9rz^vz3D{L&IALw`;2E91y2`vX|~)G7RT~w53**L$!NPyQg72y3Is&yl!ElN<)F5 z5JxBzag#0ynAZr9P9`t!prUwf%QRf zj!CqaZ!hF7eR@!0y|bY)gj00=qvMe5$#Wf?>H!^PRA7H8t_POr%6wev4mpqHUNwT0 zB;rkV%kbS{5gz!F6Kg-DV6Gy896jBPI`uV};P8vD@PDYGU_Wn3*6=Iu-9a>*VCC0P#vq2k8r6{)4F*g_;C+oRv7Pci}RCvDnmN(6gi3XV$b zu3`p@yl#r8mlHVVqYI zFL%z5CNRUr4XH^*NnPajPuL76K$PRvTG5(|I=yjp<>a^)!*%DSMygRa&K^g+q51mx zONO>UYLm#)%~tQk#bBt)nAy2dE@ay9wR?5hYdrGG0d4F1Y7=p$#4Em~f8My=TrUFv*t%$j@h z0bI*3Smgokv5U$|+#}Dr=v}sN=0qBKIkj(g>>pPTD9M0sG?3UaQR9zu$c91(unP77 z@Uo1xxlypyoZ(PGuaiVXLi>&GHLbnZc|+32IQzacwV@nT#&>%(yLDC@#fTWSj-qFn z`y<+4dz3jQwR1A6h|7+5ijn0};|(Jy_;eNd0l3~tc8nvRhoI0v)ru2ZH}akh&1(Pj z&p{Pl1(jpZ+~*ByBe~U=No@zmUPs~b=72pws@QZ{W2D(0v$HsgMmg?1NKj6`?nxPev;(h}i_owyLbnPD161$E}^ zeh>f`WAq(zRhpW*qAz>V^c8E^$QN;=vij9v&v}FZR!4C+Xvi)*#w%9CK5|Qc)*w9% zehMvHLGiB;u36KZeGIX_B2AA{Pin4-nV@*_I9MLvF0oB*P@d|0r=K)q5|%BZOURKU}k=+A1>Qhk(Ua|2u}l-dbR>M!w{_0Y2;3b7Rm z3NP@bW3!8SPso+ELV^S3#4?Qhkf|>b<&fw6I+tpo%Q?iEt&jq*kCEJ0!nICLd#T4bP57*>xSWZ*lQp#^U3|R+ z5qlK}qyqv({n*IMis~J18bm4C$(ii6p!8YIvaD>5akkoPK3HjPw3|&_o&Z!uF5KxK z=9Vi=MQKT}l-bAlQp>b4DRANuStUTOxnG!0Rje`ib>2xzm&-Hc%!gDnYsAIY5L--c z=D&HUuLCzJEBYV`lcmyVA>#pm`Ut5zz1G`w^q!}aD}kqR$tPy`UOKy~^;M3Oq{GS5 zS&B)`B+NMj&witNDt6dknWQ*%PQ;l-TpEy#M+RI;rPvr%5qtQ9|K1C2v$Cd1b8yb0 zYf&7IeUg2G{iJP7EL7c&4oavw%s*&x=oEGE*kuuV4GA zblbzyY<&O#;q$rhmh{Sz+B)|?Y6Im*6Ka7k%D3uC;S^8 zU1IabI?di}CA=pqKD=0>Dm*;o_aqQdRh@qKgWruG99ECcDq+|ah`&$de)9&P|4kXO z@!%8A@8aVBxrnmz{%h!eC?);|O8grz=)cw*x6*!{@8`8nW2C%LCwW0jQr5krW`Sya zBUnkTMU$`Rz8f3bpn&PtOo}u&$Qel|0DSwbtUM&$2sK zOP*N5YT({&ijL)vc{?xOzq&H?;4gSAn6NIQ;_%LIq_1Z4`klw0#)H_r>yM?d?v%Y3 zHEa5Ew~VI-@eHj9&E39Co$p+|4Jv=W%>1J{=-!XOO~_Q~&F101PViR@{>S=(l8~T; z&8if@LL^380Bq={mU>?>Fdn4*hr z@Ps*K!4CWrpc;UckHLBL!~6qbX?mLID|oV{8LL&>-qLW@`bL0ATFh+?9{LMzQMS%vv z!If&FB6+AF4P34oQ{(vFWBL+P_HoS>veB3UhE&qhghg@U`H9eBtJk;+uPh#l)a34- zBz(OtH`XD=y+>NN*SxhS7;8(KV^+IMoqo?d&Z651wBf5Kkd8Kg3kOISXND2r#o!+^ z1C8%GZrQQ5*zgFCJ$vT3yx@=E3&0xK?!INSQn2@G?r2l>n? zJbjy4?v*iYrzer6U`DjsZ{al1zDrt=kTs(6bQO{PLC1BIa&i*X2mRM?-Is-1@_?TW?A2MR5AsVJ3ucxTeJL+@;uctS+02wBCH?l{3>hdCb{QOz9bufh~0s{Knt3HfQ z=@`UC^Sy6DXb^RleT&4nAgv2;geA` zrR2rs_9BN~7oG$fIuKP{eM-=|6&{^7BzC}XL^VR5FsjeXyH#0N&OcN-*X4H4zCFL# zO1Y;OWLuWEIpn^?e|pTJvW^MKeUQqCx5m^}Spbv`d1nBGteqKzD8d0Ju-_+JAOdB5 zGALT|W2k29&KHLQtSX-CZ*%wXYD_BMr7#Qn$@iO{;r~(4t*X%Wzl)4$Byy*$JJ4-&%fSiJwn9ACwC`{mFJdoxKs zZmTp&q+9@{Sa&6F^Ta?|&48;IfivVIKCO&|irNd1SwEeaT?LCZokH(1jj1IsfsD)j zG1b&bGXRs5_A&%qhQk5im5BfpRfPB_w{`BT%4~c}YxrIaPfd|+7D#sDu_r9CP`;DJ zFfmBoRM*+TQAPS_gms?=M{NNb2GC~=H@zsw8XsS3wFw_zqC`IgdGr+IkeHMcmxE@G z2<6Oj->!VV7gB`KRjRO$$+=r)cTB|AZYr(0Kh|01aEd{mhi7E8FM#w;j7LG;G4HeC z_yQ55HeyPkoPNM-969m}a)XI4Rv(I4?`Zep&)fe5WGx^g_Is?C-}hZ+Nnq;we!OqC zkPj9xbKLu;b#wg7`}mk~hWLZ}nrcVUYR7qx3`9WUE?-hNimIGitTF>Si2@ME5rSi| zYUXT|u)TN^Ne^LdsWa2GzPJ1=*^SHTs!6QocU1_~CH>ENU#EK+g^{Gxrc;@#3%K&m zyw=5uU=mD})dU^+#Wi_Md9^BIDVrUe3C*#5)MDc@Yj2di$-JEpnd37k)bTtUZgrA0N9Hr1v(;N}% z4UZ2`t4U@@rnzxEKT@%1E~}bQZz9xsG>!l#{3`T- z<4$Ud$XKW76DjhDwbkblM$kM3lC(-RUO&L53o}57 zRTh%^*j5x7e#>IB#-&xnd0xc6^SsmJg}w|(H!(Y?G63-M@qK?M-Et^xnB5q9&!Cd= zp0yFHj#voyqOvz0oNxwWM6w|xcHkXXQ&GuMd_Wv6xi`T@@^NbW2)9&E<&cZ9f~^T6 zllIW@8$J={JsrNb1Bnk~7>iuf$IcshrXNS)D2IWxl>8@KZy9GJ^G&LirYPZHC#76M z9I(e}j6Sb5-84QW7p>LJzbCZYQ1% zrUk*Tpz9;2qRcvAKdv($fsaohSvnOg@oarEo%Ffn!{Al@#O#GDa9>-;KB9e_P~to2?X9JSMX6Qn#!t;ep|c6|Pqf$~YV9 zMrr6EiNTNz30ijJ3C|5Z(h+^}=i9d`B}BRMcSdgaKk=Q~7;nu@_diRwFG|zp8X%Gb z?NKd60dWYzxG-%YM_-IMcJI3QE3%g zS-3qqS%8e+hf*Vzk-}a#*TGXYJKBo z_PQA9&Mm+xHC6%>s@mYQH%Q8!F7!F!Ny=VLHL)8?YYs)`locfqnbp^r^sbOHkz4HNl;ldd!Ql_?s%d9Jl<}zi~q0-T;v}dob&O1%6JOf12@m4)en>44tha;oco|~OstfGR$GD=RVJpm7m2b3VHgcU2@ZW{2C0Is0_ zBTxQ!GU}=u>>I3wp4AkX-Y=)g#vh|N^Xny!+!pV#`l{DcK7wWWZ+t<{NX%Q{ZrA>J zr6a$+vxz7F-u0^b?w7$L_p&Xy;A*??CO`BY0$KuHE`&@Gh;}sPy^c@MDTCJX?kjx4 zy3SF3nyM=BIlSY!_hgS*zK$KVSc)+F+pfw#j{m!ZoSar>CH0nZX0_R>+|nHGzc7Ej zCqnI~o*P{pIP=8)TW_0^hID)og0P@Lz26C{t}hC$3NxG3 zr4n(qkj&GmC;wX<7Qe}wd{<(U5M#I{N`|9Pw-EgfaXKIeN*h`Z{}|VsQ-8au zzkfJPLB($c(~}xbhaYWcGj@(mc;k#c7E_4Ju4gK-zF9I)H>po=Nd&f~h*}s1ta-R) z`~l$99Lepl&vpm7NE^bN150zOLg1-;Q^UiL zyaBl7c=?6c5ZJ@-ama_^+V#G74Ydin7KD1#`q-qS>)?gAu4kzQTrkDtZUV1`4;H~K zbXCGZEk@W7n<7CCr1pDR_`NUl5!(5(BPc2T-{`_zF=7WoNS!4kK}qR^4$DpSD0Ri8 zssBXFy0ogqSC{D2_@;YRTpeF{(*gJj)c~ZqlN<@O)_Uc2)Hd;7)9i^S{8l&M&XQ0*TNlYUFLI>LmhMkI?2nna=Z7T_ zeH~+E1&pQqrYXPRvi0-=_H8oyZb)mIKA1{~Z!0w$;m{!Un_nh*o8)--LM-B8>?4`k zH~?0!A=)`+`O_cZrPf8#f|l)4`o>RB9|v?ZYK+|V9O~vbc=G1}#J13TTl;Zu+E~qV z{Pikt|AB{`|Kp4;GdEao&CI(?cZw}_d$dVqRNr)lLAu}vT<**7X`E&f#yh4xHdu6SL?cIr{42=(e3V1F1t>sa9Hp63C4v_R; zpuWFA&$MvP2dBs-2jkO)#}Ryg4ui|Z--*s;_K3d5Yk{u1`%Bb*Ii)!13rcZu423ZK zpFaubYf0CgUH;t%@qaBmPdjl&Vn;8l^OU>?TbQ~ms>2J4#t{mm(ptYIX4W&>sS1I& zIoZUo&(uEnQ()Qu+p%^sH#C=@=@RCCtX*tR#0^%v`z#q8lJu<6OfZ}AVLBkuQ9(hp zZ6(1Mb)m@(A<26(CKFWmv>Yp$=;iZ7`(?ZZZo9%MbLIbFXcX->nV_pGk?Db&Yjf`Q zueta1^&sSf__~PdDE?5W!MzUFGJ!#gTsj4HwmwEgpZQWIsworNo;Tp#l4KBo6)x`uW%;lfod+@3YuK3vb3n2KLHzzk1kvcF8gvlH(Rk}aM!)hOHyKOB&JHPgW{u3EuG0HE2WaA z4z4p&6zj9%l~PmHvup~jT#|5G`da|`c}#|Gz3x!^32*`*v`Vm^$=u^V`8YI)X>S22 zbso+2UpdYXNKr`m5x8`!od{hmUBvge8VU5cP2D>uWhBF2yzbJ74**_gjs$1pMaf(m zA?3%j40?wH@@S>$Ha6Qz`{y`!(}7;i;17EElQxhMXcaI`a*iYn_vpd%%C@hNE#K+B zud&0o^y;@>y!s}Yvj@wI|8^zC!sk-{urkG?7z0UK*pD*l5RpEkai)-ZHxvNKdjCvh zey4tYN&SWYI<8#ZcAv*+hX#56w8lv>k%V_ockH0SJ`THgpt2&Ha*II0o;QK<10fV$ zko~#b@ zCwJ$&kPGzTFq0%bwshX|%kedVa|@}&vT}z_Q-p;G;D+2G?o7ZTij}vGI`sq8*O8(w z+qu0ZHaxeLWVII)gW0$|FgHwijbEWwX;Nw@4^Pu%`E%F`!gOa8V0Wz_>}BN;2jwM~ zdYlOO-&HdeLzYYt7BcNrpDH5WAaNv3L zgjxS7x$pd8okepE*%J27J|PC3dX*%vqjs6_m~!cR?0lc(*m1upqNeh#)aJ-prhhq1 zZ-9%V`Y|H^lUZ*;i{k-zMH>PODfuB+dHz{WqZM{-;kLoZHIlp6LdF}FLiv#~&PryG%a`)8}i#8lwH z57sy=IY%uX-UI+*y!U0>g~yV#8T^I<+0=Z{=uFziFK{-gL0KqSO#>NltzbJidI=Zg)-Ao1xR}whDcmPpk1* zs$N;8f*)t31oo9%qnJlO6LuXXU{6ma!nB6+YS)xXt!`OMFas~2g2Cs8bGa;Y9B$e>sF7ovCW&o9DIkMY#zO8+6 z+&}aBXj_|Yr(VXu()W2bFZ;%4%$9RB6`nEHZ)`6cHr>H93qM^QmQCgzQU-- z4fsC0+=i~Hdv^jhZ+Hby0X;VitK|^H;giZ0f(S(?wQG)Ad{GAaRK{UC{LnAo97sZK zerPcniVo+|`)oT`NM?Uxf4PPdD3uJ=xGxA#~FWI2k|-{bZ?#RI<7K(1!<0Q%p8HBp$I78 z0C1g&j6gahhE5LZdsq1C_6^P`!rm5>yCk% z<#^yCVc1&htDocp9ri!P=KdSxKE`{=fh*5qkmfG);jLC|5hl;7cKkO0l>W{%&66-rh}b6G;Z_iiM@7bpTaOPR^iVMB>dHiFvN# z^N13hG8-TjM_4Xp748~+;dl@FW~0pwj5Zf*<1?%$)?&(3WdD7`m|Te=Q5vl^1tS7T zZWVuyQmildoV;W0%hOH&!@PKZ{kGQP3>K|nccehDzpmuv+p>YQmdrJ-2EoTV9Rr`c z%1@%2r?wAXBl$9CRL8oI;gy8ssVWEq=M*p>f&;Smu?4t5Ua0`U^FGnmlOE*R3yh~) z6{Fy>uk|PH3+4zVaz0;gOcUd;AzV};LdCEsVbx%Sw?yGO`>irEjxuDGe031pvGkK8 z3@1;(W3B9}1UW1jFpI35zSRJRqUw5drGl@(;mq4mNSX=iG=cBAtW)xN8lMSq3uQR0 z-&GDHCs{qt;(JhR?+d}<1(mW%zNMHIRc>o)J^Y%MP=KDlPp>H~$J5;B;eYa~@>Q&U zmdpI~MyvIr)64vljb`7;stJe}CoQM4KI6#seyrCvHTHMKg0S&$7HB#T(+OEbBjWM) zEU3{F4wKWh0 z4s$&y1+(j7#bz%S+#Z9j_|_}2VC-+w5N1q5TN7SnTbvj(8{4rwWY@v~;gxxjPq)G! z+bXI+2D~b=3|@Ql8#v&rm={vla`)Io_O!U%QEp=V?)P#{(eafo?FGN!k=;C#jh3P9 zyh=1LOT{oQAcR=EgH<0Kl3mEbOPO%n^hY)NJd7diK7LTLTe%xVPCxI~pjWc=5zh=O z@n^d=9QK0qkVurBrM3)#>0lo>GRW_NTvkWv>9TW!-a&{BD0K(w-|K8B1nZCE>xkrM z5D6QGc1Lo80bf*X$Ue^zvRizTyk&!uX?nXFJ6vmXn0cdQXQNq`NY>vTFM%#aLpeuL zgb~osA=mC!?{nDwc=PLoUHNv>=3?E;F!V5lcm;D>_lB?B&W?Vc2--IC$8;Z1-Th;3yAikM7x#WB30o&EJMT&AMepxH!xUsX zo)#HWi2{bfQC%T#G(LEYOv%XkE*~SktIMn$0#SB%k5(;0weL2+wOR0Na+HowiH=Pl zWx%K{QxlcQIqd0Znb16WPqrQd`JE*z^w3%_XLrcdiZ<3 zcK>&t-7!2EOx=d~X`NSJ!-#@g-rLU4CtOH-W7d!nj1y~1^ZucD`-58t`07ng-`Xkl z=}|tYZ0H_5hQ>66aQR?Q4%^T|gQG+9+!90${ueOW|Q$x6X$^Gc^R8&3J^O#3tEM+Q6_JTdoxVeTC=4mFW?(T2FPJr~~BF4iRk?THj(kIGt>5i`3qz z!K8j{Yy}XG!&P`}Nw#+u{VC$KThunAwrMucfpO}w3cV(GamYhi&yard}s-vJ9AIarz;<$f!N6a#R^MmiiBUO^_ zTgsud8Y&83)qPlpv4nUaEh7L|x7PlZZ^PQ^d_W@lN-#__xQUWoq-EE*$2ZSUvnMWw z4aJL3*FO%(-s*^?7C!60F5WI9ei4JY6|pbwC3JT0D;b@JT8hxTbIGl7h#XRA<()3X zI-2e<6jt~y248eo5pTdOp<%>*NGCvtGjemh`i=vBSDy5kaZ7@cQ)a%ofppbw#F~Mn zgh&ig%sF;|g?Zg5y&c=J$_K`RUL;g9n8oMn)@m1ByQSz1(}JU|?Bb8O_*NLsR6~&G zJswkxYlz~zM0&zdL-_y4-djht`ThHz!JQ((p@raD+$jM<(IA1~P}~U+tdvTiXmAJ) zEgGESPK!GfhhW96K)=wIdVBM`XJ&ribLX5pb7sz2>&{y9Usl%6UfDa(v!Cq!dB0z8 zTFojLD}Hrgl>{cMgIUE7vQ(?5P05$`96w2fKmH5-*^82(5ex}V|OF)u@;L$!un;r zywdW9<2VgsnZ9E$9UDs-E5(x&c{&dWy)388BIi-W?Oih=Q8n9+c#?HufRHDM+ab87 zc5Yky*E#YPuKiV}+23COca+p8h4g0;oV7Q8wFgUoe@*=8U(p$=?y1J6cU(>v(2Ixa zZ_aLPLBD2XB(y;nljqXiX%0QnLa}pn+`hW6tF`Q*)v^DBM@UpV+(9Z^)kV+Rl}jWM z=eg!Oxmh6<>Cv#@ogNp6k|#{0GgLk!A)q7R0)(~M9{&*78ggaC+XsxSabCpw`E`6XN1=~BGvJFg_vroj$7(>4XHd9P+8 zB^&=)hVughPfBfo5N&BS-Wfi8%c?>+8Vn#UQy@i*hPDoy`(EH!XUco56@;h6esu?) zB$Cl=bn0nO7Zboq>@X;k)fFh1tJI#f>%ZEn)Et^Q^uc3kUQ_Yc#dnsU%fFmATvOCW z9`%o8JA#`AZ}fl!J1O~uD$|_MFqV%YKX<_Tdrph5z z@njUdth7((=sts=iX8EOQ$P5ss1mApVka%WJbK|fA;7$mYa&9Ku5DPOf7_H7&&*}5 z%~Crd{l$UNmU-0U%Ph~e<+-`asn?WuH-4jHnsVl#{6Q;_O<<?kWC8*2L z6ujfceCL}>RdlUh--(K1sC2(e{?+rJ>AVT)y?azaWAp?vMC064Vj+|zBYc_fhLy%4 z9h^N+V1>^wxMaAc85na8M^Y`?v7iI-qoe~8kEk`1N6QNhFg;fiCX9TPyAL1>hIRb> z#9=*ow6W)+_ZT@DlgYzN5-OblLSAsm5?d8D*)hR7q44;QQ!GCaPumE1-=V*{|SgNgo0-W?L7hC~C1NUI8kP!d@ z9X}LP}8WWbVm*;L5m7?$m537p*WUz(Mc{)WH(OB50cJ+W^!E3qFT5>YPI@{#W zw@!xDNg0*k8qR@Ml*nT}!Ct*rwcXv~hWQQjQ;l_jc2jvtrM(|?APUb|$r44YuH|%R zQ_4#DIF=0I3dCpel=z^@HcKz@nwXZrwX#aVU*D#zLhe(f^L^VHv9Yx-5Z9WZCBt;AZ_EhELmZ&x=-g?-~hiX^S zxDV0|JCP2M-}E~z4dz1#0l81GbTp7Br69R-*Uc*TJ&ZR(1e>XJ*pC#aDwyVV$IP>EV>)CUS-rQTQ;l!1&el?NEc8&@ zW|gqr9(yL~aW3aGu9q1mMK6v0Jd4HiMy(q@^%P0t7{&ejrQG+?8hZ^mFJ3YGVwJ>J zPCV}(K>K@uiwx3SphsSH;rqe_g3q1>15_aQvzRBddn>MzBz{ORKv;F(4yY%2olP&=3$ynUZ=1&tEMt3PM>6}yRAf7}`Yks!eIO4@Fe#qkBpwEgUZ z8$;hFj;K-1?vfiTFlGmc(Px;{&ta#zPPTl##pg1H6N)AkO1jPNwM)P<0iY1|86{60Tt4Y^nY@`nhMaI(zJg?;m8Q+_yMy zZSaX5a9@xbwan|=9%S2qCyps0ylsrmWDMHrrQ8Fxf@n*u^a)_eUe+w`Z^n_ zW3_kvnbCvym1C7p`Av4 zq@WLG0woBUDA8&GFe7wJc)W=uT@}f4Ug`PI3a>q!kN!TBzf4V%>!e=0Y#0|G-0GxI zJHQG{$j7^uma}r#e+>JIX&lGu!?xZzdiJ^$bWr~_dxptFG(aA{7(ht6G>ctA{m|z& zkrXhY1I)=*(Rw`K+sgB2*?#!p*3?$}RyyCMsh;O)Cz6M|AonvJ070d~GprvXOE0B= zyXu4&*N|CY3iZdm`yi9Vm}wlka1n+HETCw;sKha^`AgL9jSAkb>p?213;|<}Sx1$KAt-h6#7g&4!(~{`mN~RmoR?lPp-KRI$cp zDXbnFSBJQf7Bqa&s+Mpt&&}U13@4nmLNJx>2uP554>aJ`p^Hhv)VtkLl&JJkG>aE8 zK)sc}zK7OCvE7VIeIhW2;co8mNSP>TAS0nC%@qzKVbpM?mzvg&CiA3wX5g@;;*(Kt z1P$*>;&bIlk{#vo)NA{)GY=MW6{@8m~lLDNxGJ;V`g7Sn=@- zw8b<2$me3(GA zL6zp@CIMt0Z-?`hnK3=wE2j7)1&O6&AjRUv(}0_mrq*mkUnd*h+FKM;-tI7bQJ zq$ktHP~bFP>3GKrWze%i@ccl+hz+DX+srGbay$IfC#Dp6+#TvqS~rJFJI-ShZE4^H zC2xC5PICY)H=%t#0mMhtdbCzB&=WVPr&@n(`=3K;vCGO)Qg*k^*5MhEZQ^lfnwVSZ z5m-D?(;&TRKBt6r5tbK6ZHo7c3XlT_LEMA@wYIcB0PR*(pS)A?*Yww-Km zSia&oy&O3?>rN%T3=pyvS>@Gu>y@jD58g~gZedcnr<~PpI_mE+-^O-e>lEH0!P$CW zPB`=^xb)&n;DQMF$zJdcM>tqcVty4$+Fatl!G3^AkM8hS6xL%6qTsWUPS*~>71+yK zMN8&MEfK3Z-a^-i(LCR>VZb5wyTn7&Gdt`*OYt*`ej_PN@-F9AFp1;Norv9+P$AZL zWaKcRS+?01Bg)*^i+dPl$*|Gh+8UA|Fz9f{iM4ikdg?fmAzXzl8;=Fpu8iU!VvpfC1 zGFkB`Dc#{LuWfF`8v}F`in=5z&}gs|EhvgaGLot)9;jn9&=7b~65C9Z5Lg}fE-ffs zf^9wUrl0;GTB)Fum{x$6QfizIl5iFPs(p#(HwWwC>JdsB)#@Qghs?fL@ub;II3LlK z+?ABW^kC|Dqg~Q7tpQMSCwp12EbSZ_;e#a73L67n;renFxtxp_gS(j%o!d<1h0(Jb z?YVL>!4}d!-Psy^I3+3Yh_(ys4n-9{U7ga87z_@+cr_erHY!EL$$-1P>MDJbKKxKI z7x7Y6_xriBN(!fDfySIc6>U-0R9zs#{z+UNRqS@l{FA$(TmE-98VOj3CK}=2y^DzC z*<_axc^N!&kfSv>Kr|iCQ^_j(mI)vEGEeK3gWt-Amwr~U0X21A0wMy9{9!UrgDSp3 zHKg40av6}&nGv|?lKAW-rlfzj>Cvw^Iy7+g-K@5qQ9P50$*El|njB|sU9lM33x zn-m+yoy=j%_i2dSTB zVDnxhvVtX6Z=UvoIY!xaUnq@skIt~hk6z4?Ex-_ zoL7lXGcUL`f~H!>o|@#2CpP%)8apSE3A;*q7|6k7^X2u-50ZxmR4HslgA;=1A5_ep zxk`Udh&if+5eQ5SMQ1B0il9h@$r0@NdC&@mpZ}x5@)IKKUwJJ5?zj9u$C3!2_=A4m zC|RCwSWeW+&ky;F|2S;d>}uXRel)2w+!yO;-HL}ql!f`o!TT|FZ1Rk(qpt5-_xu3Q zi%EBvs6dzs0wd(J-;q~uzTCg_Gas&2#Ul6zaB>F^a}Rd<1DL2VTmS9#=IOG}FW9#h zT->f^6aPV4EILB+1kJ`QzJx5nBY-G$P4Qcvc+ze%4^Inw?}inL@(aDH72!a;o>XKM z-zLdskbpBrjtd}1Pdk6!@R08ybJIXhvP*v?50%<^D{yQyLz6#xRX*(@T7si3yjVfH zd|O4K@S&q}f?)sy2BO*P|G2|tO(;Da7ff89Q|neQQ+A^4Z*X`u98aiWT1t(Qgty znEg6robo2fnwM)Mfcmlr{i_4C%O@YCB-k9!y*zq-DM(|MOa5f!f`&MRcCH~TYnaXH zq;1^vuP?+L83)#)EyWI~=eKUP{HRMeK`KgM9+^68=4&Of4JeTk(SAES6nX-1>6Rc}!D20v&fv{TS`)mhS z{%gd&td7QdoEjItYMcJ(D)Yp3(L8aa3qh>gaEGrrw?>hhBT%0mIB2Ti-ZgsRB$mb8 zJ{nS&{W;5M*=Tmv=A6y2`@Ig=Q>#T5j`zv}AEP4FSxr^eunR&}SJhy!9GoH72)s0M zAxSoRN8BHum%M0P+7&w6C}Zo}=doo;^Wro4TKH6&E1kQi#Z~gcQ@5l{Y&1k;p+Ygr z!w&CihnrASaI7!{Z6R(#=L-rmN^urh_8ea?E5~*&@D)>gI+j?P>~^~i`L;#p-j34U z{#lp(cQU4#RwV|FM(NaiyiUf40D*UyOokD;4;!tquIXD!^O+`7KL@_iApb>WBW?g;}XZJdie~=a67ZDDc?3 zDHBuyg`v*0D7^&&@DWj!LO(T$>}D63uTu|JktJbk$WXiGbV1&T;ojUQoS|n8dX_L< zAkg#Bi6_)o&u_bv)RK>kkkixCCa<{COmM_}|T zI%`Rx1k`#YcpW1vpN|^5YhbjKHIX~{kj zoi=m>EY()zmAoxB;QY<4n+5RTiwJo?agjwQh4^$MXMRk#Fxu-iq9B9L9I7G2S?ow4 z?Jh-g_0v@feQy9y2e86|8|Qn>8sUA#6hSpPY>H-jpj)#qwl)S{UvlRdZH1mz`uW#l zF2vN*{d`A|6{0cqKd{O>6E2Ts`^xz^P$SJUuH)vqIX(@@__PTuz=3(07&1Zw_6uoj z?l23@m?*7*>%SI93`LxSpfx64_0(I8p@$J=wACJVjV%@}kaD1no_sOxqqc=9Uk<|6 z(qzyKtwTpbspW;OAULlUW7Zy9g%o1=Tb^tzTuTn}XLyd9l2J+HVUo-|)B?sDG-{5N z$K$$n%e<3MKfHF>`H@ffuDjkMw1$x^H_pVWCTccCVmjZLx(dtPL|hC@`22N}iE!v= zzI2^SqP)oQ@Vtqi)TR2%)&om@Kf;XbfG@G;lW>m6Y;F48JTsvtz@c(k2@SUG)VQo6 zSxH6v&T?WY#$lEDoj*f`il}81a4MV3ej`tveVe+x+)pgdccwLVwcIi#8*wQA2oQgj zlsC*cN9pvr^_<|FsQhA|@5YUo`?*g$^InPw7VH3dqqhzy3EqUBvW{%B$CfmdR9W8A zxwd^9)S1e^FW>$nF;w1SE+syaDr-q4&oKb^7C5c{TQR?;A9k0z^gsbs#oG#i){jkW zZ-vG4`KugDsfBxX_ADnO(3X%*hR1>KkrREgF7(>XPpc=PVVuVz5Jm!d6pg{XOM2N_wvV7e>9DktDrmhZrkpFJdl4yr?HKlrpNr7wCD zF>qehH#Ba=PO8Y<-lHG+(}`Y=xLsA0MMz`9GQ z&)Z2PavI47Gqy7y$rQvbw`=NuP%V&xvxZhze3M`^67{aa9Pr@DY}aT?6nG9QKGuY5 zdMlJ?_vYnag=L}X@87ujO^XLXlGLin8bj>pL?g{-S%is_8gopD2*Bi;@o>TURn}2h z^x`l5iX8SsGdKB*jXr-pwX8rv2_5SmJa`TM@*XlXbY@YyR;;=|+AxO3(1kSufDf60 zL;wiOOuAf{?Gly!5eKyvrWRt1S;6G96FKrTvdJb#nMaoklk zENoQ0-LLlhT2^L;`)-c~)NMng2A@cKhADocv+)NoW!msC@!%yAFX4^*Yp2QwA-z7% zz`QOr?in0b_o+5L#1uYgI~ic+h!wzlP9({WV#0HLDw6Onne&N(F71fSde`^u=%U;Y zcZJn&ip0i5oljLvm%ngi_<4jbGNKV!lUzg@keU#{ue=jzSW4)OLgn6*{O0q$!EBV= zhT?$fTCJg|uIj?b_an`q>V8!YI6!bW)sy^b*eD^>L~^*|0hsghDU&faO+$R7?z)!02M4Q>JK{eO=BcVVi4LG2=&5Z`=i$ z+u};CUT3C;PM>g};?fu-VH*6jZp8hLcyYqzRzu(0(#wcvAH-8$qE((*^xRXg#hmnb zy~g0zJ-do$Yl$zEWc{s}Zb?p9v+iZ<(X%8o5TX-H0sfPGU@6F$Lty?ONZLsL+u&jGXt=t_m-abv1c%qKLN%k`yyHwxnz_$-iUnmry z8`q|KJV$R_cqzA?x`n~fi%|`8sM07{RfZ;q`7S{;G2YV@n?K(4KtQFB>Um~CU3w~8 z!&|QXZ{%H3D)waxtYw3C-IA#@x1kckCdbw2Pc94LMi+XVWrB)*Vs689N0aPfB8fQs3XuzM z3yK%EPCgbgfhJyFZ>kx>CuSy{7%>2R95=e@U@UtbiNa)HJd|%XpI{irY4~w^TE+7~ zG>pQ`7QK0W-y}x3;4d>NQ)Ty`d)`j$)}KOV11Cc#p4Nxee=;g~54Wf~*%E0-aZ{)wqzw!GoSD%B6tTt4*6<7^r5du9#jn-u5Zq zKbBTXeAGbv4T+xAYnw6ar}FoG*tUExh1R-AK&y|Z0yo$u$h8WqYq29}K*^Kf;gO}_ zRTK6DyQ}emV^R3^bH(!!Mx~HIkFO{9VWTPChb{}&Fi<|fVmWStktiTtw>uIHA!mPY_3WQerB@bX)&>6MkWYLYC_sP$!U#mV-`+)fy!O5qdXK?y+@YnQx{hPnN{Wq`^)Qgr` z?mwr2Rdb+ekl|Y{DfZVI`f)nt^E2nE-609RrJy})os({H?{{N&`nf-0# zD*?MMmd5c*egAh+m;c_p$rZh%V()&e^Y2cqT)Cpv^5WkA8VwvZZU2&Al+m$GO9JI{ zx!L8BLgS;HYvLHIiZLjH&M~PZm&eW^kXo7TgMVkUrXfkWBamt{_P81XCc)8G5)uE zaH;C$@a~_t)!$fU!uY#pwBaSg|N92AbPrZY*M1*uJ~(!&Ij_>Lh^p54wPPWdslJlSAbyP%7 z{}OU2JZt<1Fn3Sk_sP&7K&F0W8{xqpz%QcxXO~Ip-(+hd9?U3wSg(Ip^FTgA@~4mm z^`}Zrjq#ZP#m#}z*-qQ`4{I9oTftm%i_l>9*?PYj_THHeQQD=`ERShR-#iRGVPr}k znh*rw#g=WX$-Sj?6RRI?rYCsmOJQKzHzCS9f0iL|+sL%rv*93pG=unCH<@|Dg>;k~ z#n}xfrE}qkFg2liQtFF(u}5UWU(~H*2AGIM(kYvarr)BlhKB&d!LD}JQVJ}g06ifp zGETp){;8otUtXMhAhFni=J}nb1dMlB%BD~$BmRJ|NWhIcxA1~l#dX#!^^kG91RMqC zp?Ikh69oK;Ib5s4)w`A76%9T1?&sqY!^yh94CBD>6P?HCo=xNE>hCQ(4q4`tBHL`bv^G ze3iAr@K}mOyk9e67*JCG^yM+9+Z_^`z~GOS^$5bchf8J7FVsW3OwaAYYlBm|GSUqhjr3|xYX|O((|f;+|ksv2X{oe?$i~;iHN{AP`Q(g z*S%(`2d%=IDHj2Ht}`7SZDv8Z2T#;(Fd+ghd^rk28@IQMn#*k3@{61v>vUpVcqu4< zLwS55<48`_e0MK&Q}YIZ!VpvY0krH7m(v963v=9XL%w5O(Y?&balY>+2f@n=;+)D1 z9~}T`rwJS=|NY1g&L>c+s+(fuXY#X8d*i9I7A{IJr+XjagWhQr57D{6 zS>k@bqfO$kiLcxPn|cfc1zyI6)VDsol7CQK zabzCa{Y4C2dX+h6RaL}%-ypw5nGdKpXh=-U5e*m0(fWut?fCI3_=w21`(2d%J{1ey zwW!CRry5+?%}-qJGv*Bx%4-7m^-L;jP?KZ;{B@EbY`Lu~MP5^ZBS^K4;TtxR;(+rbPc9xX$&K0?dRG@F_3{jBafmy^&5th{ML6A_XWq04y-*;z z)(v_5qhX#N6@AksLdt#2J$9dT12oHPj_f0qL7kwdzMaLEamMKTSS0Zd(?G&|LQt^~ z&lPi>&2g;?#gXB&3fYW+c>z~+0_&2`aA}iy*TjJ~!!}*o)?u$8iKG|9ehS@3x zI>;yK>Bb;6d3(O*k{5`K3Ynxlnh1kPsQDwdvOh+^*8{)s9^P*EM5oZwG77KJ#hUkb zk`5?=yPpOuMX(`FCetK*Z^C+)Q_f^qaf{4F?KEk~PJ^I5Y_2#cazGN~1_cqML$xj$ zSs%unztRtLNcEaDGYyw2HR?gy<2j{#)o}e)q;QrhRmSF_!gEBf-V|E;2bA?aQccN= zsobhNb+^fcW+>PMnc-X`xaKI55Q%o8!5X)EhWa)?7@Hs(cN4hl~3C~ob!6Gw*R@auK%3TdVQ-;r=_G8YQ$1zU=?R~fe5~_%q zHkC<$w9A-v6MQr%olu^}oMuPuOP$eK#0Sqq<+`k<^I!I#Y{RBPV3pxgn6sLMcS5Qq zNT(#YTxR{0c-AcQ8Kruf>zU*Hx(rmNNDEuc-ToP%Z$e0+Pt>Us;8px>tG86jT`{W7 zT^c@Y`*1bF)Ip9>KHxa1#8jjbg4L7penJa?#S>s|4-&*Rmnr><`B4<`YbyDQ+RzKw zS++SmuGEv68ADoKmxig~Hj`Ft;8s4nT}9?mzhN`vWLFjnb)5LIRq&b`ztsR)klLnaqK6oov1tPLyTF1qghcq{mg2he zLsJ0UE{|eaX+X$WKgH10Kn<+)gr@49jaX zFEFytN|~%W*Hcopmp3WH+|09x=S+U&*kmK!9aYdAZa;Rpt0V7gqtE8Hm}x4@0GA_i zgfJjNAZNh%!XHUO1)c@1M3cOjzPZP%BGZ%Bh!h8w}kv zG$|d;#~#vFEQx;F$Gm3racg^%-A4?1G4X<_+llqBx0p16iwli-{@Kovbtm9gTNIHg zfOZ{gjWq6CZTbK~s5~0W>O^!jj#>BCF9+&lcYBCfb$R)^iw~Q3M5=^^&<`@si3s1Y zK7t_KXa7oD;h$ERRz2KJz3kG<&Bfuu-dgc9e=f;SdN?kW)@1?{jy|wDB}e5NbbtvO zj)PA6T3aK2*zDHJ3;w>yv(4DpG!uEq;;JKLsHDF9JG!snH5FF;=d@{>Z`b5*8IQ0% zCxoKZ-EfpJPv+v@g_dVLIcbJv)*Hcbs|}>Z{ohJ-J$sLkd0P$`RYzVBEv<`v#wS#|R`1cNY9eB5jO;B*GG_3m4T= zCjQoCN9ia)tZl>{cd{H2w5vM7k>_vwgL&3ECTRk?GnX&dA2rpP$olJmu^n6i+>BgM z{Ep_56#nI>v&Z`UwvMG?s82>W4KJIZr{256acpEo5_Mh%nekV_0T zh?om&xC3C)wSOcx*xc&#*{AQRh)9hoETtx^t{M)_$`RfPv8!DPoKNNB0}syEP>OXj zF^Z^4V<78v#R7^aL}w`S)L{TGJSHovOW8K`R}sCV?trF-{ncFr)st3N)4UcLQv5zo zf)>UHlNZ*|Oz7N{+5|Z)0%D+6)zA3DjYJjYPkk3;ygi>QFnxz`lKu{v4SQ;Ur*qtm zYw7s-B|K@kbSByAh%UZcC!>@*EZM0b89i{T|>8{_Upc+KuwE$a#dB*Xwm*vq0%gC|@wdQuLnjqQ!LEF4waUiP116 zg~{8(*WLoRmPHIPB7zX0vH|BIp@=5hnLXtK0FiplxZ=0(y4fK>4NrbA>^oW}(d%<) zuSIu3D{>ZKZBa>2cXUa~4dc)@mG)S34(^ zWZ|USd<4F(BQ@TvZPjnGn$KcrFV8+z ziqjV(<&`5|XVC5>2#v*xbjK6u3w|gXD=jW)X`Rf1GiH?8irVmRU~2Nuzs~E=f6%$V zhgUN#w2QVPJs_VR*f?2FeUwnYQ-Ieq7TyzB_^{Cf-7si)NoU?&vkhf-*BxC8-5g^R zuE5KNEtJYt;5!GFp_%1$;Aj%#QA9j8PzrhmU){8y zz3Y+N|BTkD{~wOv{kM9nR8s!~0Dpb=qwLmisK|?z)SZioADz7syRi>qjGuQ1UhZ9e zh-G`q^wh`y4`3g^4-&N($b-HaNc`Q>C z=a{tk_r8Ss{i!dn1ivNAd=39ab116wSLng7O$W8VhVBkl{=Mfiw;!vSKS%R^s{W3> z&MNz=f_!>;_x4w(oL#fO4{yHt*ZYazE{4BvKhp}=pa03#r5?tT`dD)6uj;?|{J#xC z3j6GqdP#4E=kI@x(ERURfAW3v==xdiqI%rRxoueMv%ih+@!z`sF8`n}Tz%T><_T zV-e_{u)d`!A*KRQA^aajWB+G==3leC|IPG{|4=%|fANqkGgV8!-LuV{759l#aT)Kz zCI)%Xze0oxDQn{wQu*MNDo*UhEpsi0^~ST}-e>g|;=hj^9vl_dz3ms5xz>FA3#z1hDFJbAyNs)!&$s!X`%Dgfn-q55BG~UuM`{nv-7X#ECR8^1T6C zh$1p}%H1OUP2g}zC2)^EnYBXCg@Q4Cs!uRP>~yG-+EhcKnoXOtgMevX7`%>7K#>=g ztlN{BP;RUOHa^td6OTRH9Y;2`N9Ae$+)^!HI0u?mQj8Iw=>iYn(s!r-r+Sp%PrpVD`zQtEmnY`8+#T}LR{;h-_W`&Ns<1+ zB`nYMMm@YJSLMwEjqboC7p`?(W^IVM%$#hOo!D@(<85JLc66n#0to}fUC+;-r5+lQYf)s}D_8VW1e|F-er{Lc^R9t;l5kK( z(C*d;jcHh*mcO{-E@TS9k~}hxdKov9DMW$Z_oyn-&*hXQG%w|q*iXz+b{9(I5Vn;z z%UJQ1v2wo(N?r~=-A5V}K{m2v5>iUR{8Fg=+rrrW!!6;z3mZa73=#+ZalIr$#V zQXt>Uyn?9b0lvjR2NeRz$Sr!AD}}V;=9hGS*Q9qk#O<_vd1#VnKR?eq5vKapB9y6; zUgVwo+ghg@U!nimUmaf>Eh!WPtRY?W=BK;+{^@&Ex3NeeAl8Yr+$j_A}rGtj+1rt8qsF1(mx%11zy#>yQ+n}#B1VxlzB zCGsRNG!=U4->a8FqwX@Pvu?E5>01js&P$Y%@DfdCm=qKy?+JC!e458hAB>hrTSLYs z6gdhMYtRw?em;{sy6@ey=2K7zTHo>y6+yV}6gX!~AL@<98WM*v*T(jL9`f+gg1v8T zN2=vLoT$F#H~n!eerWV<(LF@XBgvroLk~m)CK5mdXY@VIIHj1X)}`vEzhe@dF=}nbKj~-5LF=2xr268!>EV)R%s|(L z8C#&XM8?LTGUxQn#&fWVAr(s)wR}Q9ZFO#TDk}boir7ACRIa&UOSExxw>%FrCcl7+ zKQW=71Lz!7g{safZV2_s`jrU_zSTe@z1p&(o-uTvr*M-X(f+2oQy>sGrzQa!FIh|V z^l6=}+Zx~dF?dg{X0}Kfm8MNh&t0T#E)x`~3ybqg0&xQf)W}c$K{g$C&!N_GIt3;qYf+Sn?9vZ|4cPjwEw(=;J$dT>sN($#bQ}MzWThlq zAGb}@ZKk?w^u42cqkIotGh;L1vrLU;)6Yhf?7Fdc??1s=ZFG)WsB`pAr6cL*62YCR z+5&tryM0zxRe97~r75UGDWg2{Aj(*V1o+ZUsGMjr)^|ejz4yI=h)r>;N(P?l228z!Y=-u~xnm;_aqd3kwx%cprmSH^Vsh6jR zdM4?0i?A-p$%GiF%TdH_62=*;?Hr#wY(m`s1W@t#hSNHW@1ho7gu5d$vbQ$6n%Ln` zpX%ySzH`W+4eMflFK|kztqLlQoUTn1HZFB^iRVL-%^?Z;V~BF89RW*VS%N#iwkL@` z|4X^~Z?R}u5D(nf-19{D;e*8M`!i#R-6P8ozLoHY=F7Rz``{T8hvHHXfvT#n2L=xb zV+})ydpk2 z*b5o)q=<77nIcOz(kO<2X1CdNpy2Zyg(ujFZ+qMJ^yF%Kp)*D}%W5WA-M#~%vxa7xCy4+%_o$5CKN;tj1dMy3_$<@A+H@x zknaA-B~{<@s|+_jvFmpazY;!fnR+7)ZL>{rEjL$MR(AV|t-Uf7zQw4O6AQ5R`yiF* z{!PlwShtI!x2R^)XvZ)0f$^4zP+^lEr&&2ntohv!gV=MsEFKe!I}>#JAbuhfE^O3E zm$rgM8&ng385ceg}(}S>13@x&j~anBxFA$WT!Ex`nOIXV3^{6Ud|S!_{V44N`ht}{!cP26q0VbJE-(^Hbtw- z{Fez=cNTBTzUKqW{Ut32%^t_~8TY#OZPJn4XOB~KT9%^<{pgy<$M`UhMN+2kQkp_b zG&)5@B4>@H-HH2wKj+4&_{R@V6z*QRjrLt`NZ!lN=rSPQNBKNFRU-v?(G>EVfJ9_< z>4{RQ59k?kpb*-&Hi>bnZ3?RF)n{*A?5dje#0Qllk}|M*;Pn6^(m2E2b*^r2{5-5| zcRpjlHQ#I$Sm-Yeg=vY-av6+lE$vR5RW0f_nmbT2Kj%gQ6Dbq9($jfpT|h9_U}KFH z>B-67xkh9!Nng^T3%k8lG@a)p^H>%d>V*mK7s)^!oQWL%#Wd)4EJ8rL57j3FPV*C; z$cd&Su>xJY##tA2dLFe*fL~cDKQF9@Yig|tD}~-;De*Q*%?q<;Jy2ab{!UBN)sSS< zOxBd}RgzP3WO~kF|8tnJHY4PRmHWeA7q3T9G3DQYBfUW}+19|6k5SGai2Lnw4K>e@ zx&2|uqS5d6e(Wh_-WSCk8%?D4v4nn?hD|&Plr>96j0J4I5bsV9IVt?>0YQrWjwL0< zw5gP#vvt7N=K`-%&yNh94+JBVtQVNx9ZY2m+w)Byy3vE@rzuam1rinvX`4rFy#SSS z1{DNLa#h_DomA37nPH;8-6wSF%FwUK&%AdxF!>z<(FAww-7*BaJvrJW@wz+3Aqktq zutx_QZoXkMQcpOTY_4uH<_{y(<{39CCeeMM(#Mc#?MTLt$AH}2s@yRt2&AlY%0dfA z{XFEh4~v~yM`CZf>x6B~`Hr2UMqBsRL#F`EULF!z7}V9(s4P&C74p+mC}&lO%O7E% z#^Md5vim640p7`01(dOxOw9D=sPO zC~%HrMXESZ#1(TW-uL7_SEOY)yBuGu#Ime!2ceRkYYyAF{QSv*cS#RrAGIUYS#*vB zsR~3)8M59mnpFjuFw)V%TC*F}yA1+y2OUyvBlH8U` zL380Ash(z$CRPJiP28_R)KzT~t`BB(d}+k0PsXWadrvG+aSIpf}QzW-)M)|hLq zx6HZLoX`6_cam3KR{WFV+083*n#d@?tL!1a*0^Bq+|wZ~xq9 z`fwtdfBDR|n02r))!1s;Xe8;1+2W0tTX?}J1V7_aEmlwP!BaMk3v;%fg>(M=+O;pw z`O5tH5$(-g%f@Y~!>q0!oJyQOQu@6RMn0II_wrsl88XR_{ybm#4bYGKCgf9;x-RnW zD`~2C^>oEx*AK>zRH-e`b3Zd-g(CiRrRS4=wcGWpl=j5%Z&y*jT)dLMd@Am?-Y%u5 z-VyIM+36o?qdlw>zFbsckT7(9gHE(kFw(JlCCq9T4$cGUC@}LORsP(p>MuiZL~L37 zBr$OfExLaF_lH**Ez19H+An5g>D~Bc;zjm=zXB8s5~I0Pm8#)uQtmdnf9XMVHCi446?*oVP`L zhBJYf2uEZ%M9?6-~_~x7SX=}F~p9$WN5tkcRrRnL#o@If@FRwQLceN zzP(*LqjYm*HUkh}mP7!+O?e+_7vMo7mJ0b34h>ZK4!X9T6(z?Z(bD8^2R=v$bvUt5 zt8$D-6G04V_8zh%S^oe?fgJ$=s=peBLsqAxYfAGpXa7f(W|u+H&XY^`r`77g*;0aD zqE900W$sQrc{uT~wU*+~U-EyhSIwo{KTsI3J>K8F9=iHxSO0KZt&{6nWMD;e0z?Ln zoV4OM?3Tu}*ibJBO(Y*WMvzsiOnVIJ)snPirhRB=)@%~p$kkY#f$Ed!H&Fzg%v zaC*SL&--oM=cg*~N?n7+&if!;kJr2#`V zreRV^Sdmsfpaet%pz;NLP$nG1aRGr^IBNCQzjvX`3H{)yQmh%nMqEbVl++yC<6*CYif1=9;ib~v1NX)k_Dlei5{ zT&OqL#6fA*xf2|)?`8Ko^$7{p71dgr8z1ktHTxsRT)&DMk3OtlMbR7jV(uR=RtTCF zT3C% zUG8O0dS6FyCZ(G;IQ*j&x~|b~jGtNs4k3LO-V+lVO+&zGeSz1`rpwy(8^G@E+f^@Y z=0C-UA>uuilXa_{ctmll+({BR#@FK|?lP#K)5Nz+84R5JTS+! z9Nm|oc5gj)@mrZ;M6Zohp{qPJ2qlIh)2mrZewN)=;EW5WkGqi(Ml{T?) zj>J)rkGa}j`S*k~>r3(pN1V{CcmQcUD#Ct(YFBp)w&1nF;;t&Agw*Gu_vl5Z?#j3w z!NM8A3Cii~d<^y>}NDgrS8JLu5|XAJH(Ag^$DP zeZIaBK4XQ#3gJTDZ^;f{U`69zCP^m>g+uSw!Pe&_ThBv2&MH#%OrXB zR3)_dPfXq0WoBa`@c1b*B>!Gq9Uw*fL3;=(uR(Bo71BoLVQ%DIOv}qD_x+u_EbKR6 zE1jM^`8~dvfp=(Wn*o`6upd+k+seXmlvMZ((hXLWP-l3rVvUrVo0bK}(q-xCb0|~t zgX8=UI2lZ3jqK$rfHO>#P1aN!j9f6`_x%SZB-a!v&Z2Lr<=ROs?vijufBe*0cyWP! zLGe_=wyw-g@d=%uUvcB%HV~q~2A zaWxIfy}4u1rZG{7JjxK?%d!$4>2@p}9-;%HdHgcKU_y!btSebE!VO{o?faJ^uHBk4 zy}JWxwk()P54~qZ84ZhF_`^4&eF?<}y8ScAQ=tR#bM>TEad-E6H zh1I>B$(Zs$LxU$>z%uSwL!9~D{;1iCs(`HUOtvp zd_@s^T4V9ELV_ijIobMW4KrF>Dp&2)WPhUs%N53~E77{U{tg3gL3owkJpAZ=t4@FWAU;YOSK3XJAWZUW zmkzovs9N3;9d=>amom(-;VELe;7$HaWx4KFov%Lf_GQ(YAMz$n(bLD-!$f`g+9yG1 zeomK{o3A|~qEOCetJ*GR|7)T@;|R^3o+1c$bmxLYSY3heww{jv5-K(p9rbob@4ZT? z_g$@a(Oin{BshG-bx{o=E1cI1tH2fTzc$hF!6_VV!`_ihSsP?$U>Cmmymndz0sS^=hY! zkBA|M7TI#%Lzs|LL%d6n@@zw9nyS1sxx|HJCo{=Aq%S$aJ3pr_W%yn5E-MB`-V=ok z!5)PvD2ee|L3ljh^ki<49tkWH9L)pw4GXRoeS0WgK`U6?Pq9DUPnK`foixj9j@*6e zk$5Vk+TOyp$i!CXZ(gWmgyd4-3L{5}v!$y6iao&qffo+1*2W+s#Z3@Y!&LNue|kZ? zcM@$L)J#r19ex-f4Glw(Hu@2437L;9auFDMTNHhcew*}}TBe71I9Znu{i=(lFD2Li zc=F*b|6y2nJdW}Js8*O#8y>YQmZ5kyO_>-b}Sx><(Ggf}(M zSFO=*aT*4#@Q$p&H*f%!9uFWSv6pk&_gWPh9kr`-&GZ}a?sf0co#3on7ImDwg4TMz zg^u|ILrP+8rg)GgdYBrD-!NKcD;wcb;!hfP!;zHG=iY8zB>7U*t8Lfz07{pD>oO@#>lO6V+}}#&;Y70e0F!G1MEIoOc`! z@gTK0B)c{*DM|=bs^jC-O9}#ff5XpSiY-cgCSIP%%cRlIGj>&(pcLTqOheGu(D|&V z3}1^SKU(#9bgLeZKHkmAetbNf3M;broh0)Ep#683_TGzQ0TvR4tqD`nDf}N@lbrlq z)2F6we8E*XvAzUaNqh)IG9i|L{dJ~j)vSTrGMr|9(!22O#qkn$3A4idT0Q;fR>4^# z+K}{%w5y+3{(2w8H_DL;1HTyZmK3SnAxA$2DI&j2%q{x)*bZ zvJ@5_2=w+!O%aQSMc{`52o;@ENAre%OazT6ZM(p}5P%QA7b>B~Yi@!Yaa9(;Eg5{E z@{wcy_{%MQT5(NJw`m(h#_G41g~Dts$C)d&IK2is6YJJ)k4D3GqPm~~+XHD`C50cy zsck44s1Zbl#2X)Uj~OAMQ=R8Z&tq^xwOO6>nz=mNNNMbT#7an$wJ6fgKCeAdmc`SP zl2JdF?smVBgT4Wm5I8IzZ2&mnE{!2P&Ae%-dd>rv^smi{mlzKz9Q3XW%ivJP^T()2 z3LO3DS970R4O0XtKJEORDPt7Tg{8Nomr281M9(Yvmxv$9NzQ)=v46|Bzm~5>2o~wx z$5r2d1#~k7l?%P^N$gQ<^mwvti`&QQlkvHAEd88jBEavndu#J<$?`I}W_I^^lmay; z>X5z~37uC39x$mow{mkd}tlZ;udV1j81csa}LhCb6LW!3cS&y;~ zYf5AV>|A1Wcfu)X+uWW-BO{l;ho5=g$x)CPrd2yRl@Ea!;-Fu|axRsXvzCA-LcQi= znBM;PQ!GQ7-~EjWgBkplXTrhS#mB*@y`-mHfjK)m(#xsT-wQk1_w2PSB8E1Lpm|lDC2(3C z#yV!_x~A0k=yhIzU!Big$m)n3h-X+W0+NS7IgNWK4v6K<@vr>&!d>nz(HPj|Iy9cWlFbpgM@R=nXVFcosE7kdm135qGxn8i zG-?zqzGaY?P0fF7t}R)$&pfakxvd~Lsv>v3FUuY#l%=R136H<2X)21psJb}v4cUGuYf=*u z0BU{HsWoIcF4QTfPS5>SFr_<+Ml39?%z*fes+`5(O>Uee#PJxi?mx7MFB-n>UcySH-B|aG4&ADT>#q+| zg{tQDJNGh~xe<4iU20vR{U6)OFL4lv^z!1pYJ_~FpTk=nynbiwP;l|lfC=>4q@tlX z%OSh4_QwTtvwRQ3pqyfm@Y@zI9n44X1Qz~4^*H))ys<3IOTaJ(j(KzsJ6&F~5U*#L zC&K2@kocgs>L#5=oZLCAoAy^|)xUI<10~!T@Gyf=MgmdA1WRQ~wJ_>%(*tkFxUC2nJ2BMi@P4-D zejdBt*genie%j-pbcR)s9}_?^3QErYhDZNy6c;e6)_xN8UcooG*P>j|{YOCPbjOPa z2+v6`k&zs6sbIF%3Y1TQY4>SP;$pZtK{%Y3qiYB*;ml*>sLzeA+ntIsqqI57QCK(i>Ro+z0x!~LURH@KBH*5*2vs;&0v`qE?nY(CG@u=^%O2gvY%Xt zz7T&Q`uSJg_mI?%v&&|jMEOJFBEQe6LO+i!AK2di%cb=`SbhaHKHGMuxC@eVDpa_pQkcdPxWn!25iPF5W;a59i`Arv{t9_T=r36QUcy z`v9iakL_-BIWnWTsXEe@Y(>7}E&18o3LsSPE|pLlDS@8|Rt;lO{RM>$;K}7FA5`6Y ze^(6$*KTdPl*YypmaW^YLH=@?!41^*OUbcC1AEtA& z6L4_>IMna4CAVl#vB*O=A)r9B&3tcDP+gbF;2qUC-hs`qG!+4|#<)T!Wztfh82fTK zVL>32HzU8){q0>ZH#*U|=x4Fc-M5((PJ<403>oB!@UuBel-KkniYsaHj;DRq@> ztQuUE|N3&ZD&H)*cc-_ZwDX3@jOqdzTlf!;G)uy12j6cjb_=EwwEg@v;t;zIdj0PM z`s7P36M@(r_J%i-F0C$PZ})eevK|&@1ulC&-CTesvc>TLp1r?8vsM#u!yXCQ`TAs) zF~6a3SqES0?EAHA-tHEArv;OKe|XX%B3~++C%m8xkWOhmjefjYZS7qL7V`e-0|!ILVQ;dzy5d|?U8;^=^jpFc0?Fy;4_ zJdFs8f3ip6gd=ZHC;7hNc~EvnB9t#C$mnhtIQpYDZ9@PGP#&DM-2HHy%U*V2N;oJB zYi{wrp*$VFonrvODH10Ks;5*%)Nl1PcM3wrPDsp?vF5|JW>3 zNyF&JLJ7eE^{IleH+r213VDEolJ(On!aH%OD(HwX7jXN%|p?Truf}g zBy~C-m$KECfN(uX!~7t8)SGs*5^iy2(cOTsj6+7j{1)ua*KuNY4%!mU%-fMy5OjE zJP?GyQP!I)!b-m`bM}zG>pi)wkpgh*x0gv2q0Sq-`g^~RsMrG<(5W}9!m9bjYK6kE zbG_o%N0k<}?#HfK&g(YVA<+}avjMQ8hxfW?M-shByOBBZ`;D)OL(lWa8M>y!C*owe zM{9KE`UNu+tMdSrGR4hC-?Cdz9>|cPYM6p=v!Z$=_|%GOCAl*Mv)AxiOKT4S?lkny zipu_lT10JBS!L(RVyBj(lrFrHoQFb#mJ-a@t_LN zw;Jw&ge@lXpsT!GMNx1gHuqWYXJu;U^*QDR%w_kAtLsYxVr`Rk0AjOcS|9ryd(svYF`xeWY^+1AlD0zb zcA}0hAO2eyvXm<{7oPLFa$T6ekNE z(MQ>NgSvJMF3bG&&8X|AU6}ml1Z1#T@VcK7$E)*UT{)ZG1r@@^P_%rvadQT> z-G(f*jD&&h>G<(9{k>h1bK5HGSp9m^2k^xQ+kIx+qq=h``_JzNvCnrgcWrih&P!Hr z)YJ=pN+HXZbQjm-#{-D=L;;-bx}@#>JGM2!lgkNa_02s=Nh%U4S0$zlD@cQ@jOW;^O~IhdHZoQxV)P}^#El@G!*#p*X| zGGj=H)E4k)Zo4XKgK85su(om?jH?VmcTfGNKKJq)Wq2^21v-G0(qP@O&PxIK5iYlg zXt}9NfLr-cHvp7KCQJdR{!?2!u**Ot84qe+&`8vowsjxB8=u9ahs=P{%lJ%Al(5I8 z?SY?PhK4DmT9MB6&0qA^$xx``lpSW(xddEa8H;l%}H!I3N+-r2R=#ht_*tYis|A zgw@P-x$g`1s?_ay)&iX#o??aWn5z11(IWbQnE1zJa*xX#;1WDMsk>S>Y5hpDS{p6^ zF)a<=mwSS@z`72g(~n&@IOKks40CCSrtE5JyKnP}go~R*P}I;MJASxfJ+$?ey@~(z$AfC)l*tI(p*c8MuP@gl&dQ{aA&z8_({^jP< zp>JSBpIcu~)+4^rYq`kh^EyK@%v7n`8K6D^pH^!fqH}5u443GA>F=S+M!U55rCo6` z1W|7=XE}lHb>01eWvN~qlyH(ZL~)11-f(%{oRq3YwBpq3mR519pzvc1=CtYptKd-A z8lC{2w+W@N8ocvCs=jO zhqCX#L3>>o=H#X$8VgjsF<{v6MZX~I!0MQ0T5Y>}PiUAf2lg}bRG!~h_3>)d;w#XW|Z6mCgPfa`Rl&usF z4#hq5@eD~E56XfQ?%j4o%E!y|Su!7OBxz`WxmB;!C@B!>f9_zqH25H5ND#(qPdW*} z7xk{?qd&VVhR+;e1k~iSNH`zwYr1Y5XK=;ja*+b_1@fj>J*xkOBuom6wN!A+DrDc~X-}%^$ zMn6Ib@M-)-z16gDrD!9x%ZE$6uQ9Q}eLA)G5xhe`(dA}&HZL7%mGTZ{cMPrVQ(h zkN2hWd;eOJ9pzcXE+z8j{K~zKB0M7lek9?zxlvs9*o&P#4&=j^j^iMj!_O+OW>!{q zp_kZJoI2=nWzS5tR89ICbM~V~JRfP8-wWA>tLRI_)9gcc@o+ewn2WG$KNCA@sHx6T zURYnmW>}~v%cCUJH++)Dk1Qgw`C zzLFPIKP?q96U(&X%`53Dwt%pdMN0H>YgNe zDg7|&5h;qiHr&E>Bi{zZWy@y}kLF}1a;~2;Sd`_0YA7!ozg7wrvq{fzND2%C+saBt z0P+nJ5GPs*%so-Eo^$v|HthIy!DHj{uO`GVc@SUXa@{B%_Y~MB5kDb zDeM@OW4}UG^NeV}&T)}6ycl86JP(b-5fdN~^WJ@}DDoqaW(qknY4|3jAez8adV2hh zma_UiF=`$F7m>7gmF4hKVY_aSxl^h~Y-hLUo>`}a)X+z_RT*8c7+!bF(&#)$RLNY@JN?ihz~F_&vUd#*nWJcKj?MT2(?X)F*qqdmsqs3?e8B zSpbpOjO~PiaJs3E(m}`#jJckRAM?AfvaYKqm(D66rkqLDRoFz4XvZ>RRFP zpQe+4=x@MN+Ugf_BSSxEgbcq;$1tA6ps(<%HfR6R)NMJv`ko948oqmbIvi2;DoU)w zSH^ge>K^s8Kh1)_jmuehO8o5akKXtF@zt+iyzmEJsfk~>4F8TrDsvyLe>-DM_?#5- zuoS=IekiH6`+@6=m#j}Xl>RiOdhY%PypEeQF0$SFNy;D;e4s`Vpk^N$Hv8%DFU^Xj zZ$Ga!{@l;3l@O^#hE`b`^B^rgD8s2z^%}xW zu0IXH|EnQl-2P_`>z`iyhXg(A9eP{={?A`u=zoF~8ieLC?F~(i&3D|Y+#&s~NVo-d zJHK*C63j(JA8!D_Rln)5UOxuj)mb)XXq`u`q<@iR>uu}^T+*Je{p^Ngnay9o6+-@y-vADmU+J8tNc%~f30rUN25B$b#0LnK;Wf#Qm)H-cunPQKuPGI=14I~c z`62-CxSsP*7C(sc@PunuHQi1>U+5zV%tq}qvC3SptZv=M-}ryx7XHC65W#=t9jOZ$%Fh|7 zU)Jd!GLD{P-5rq82qvyK0Y&EOWje3YXJh7o^c?mXqFZT$0GUd0WfE9db43jmJ>?be zU8mIbDfThx_%3@Bu48m~`=>Vu&oUimKkz|D?6JJxbTEJ{S?Hc@T0NVyx58rQY2p(h z2MZnAvv+ngD&ueI-2f2YJnG8IW2zFRzHH9acP2TSg4&y?+tsS8O?K`ADG2Q@BTI-* z%@8nS%Zs|qPB#cm^i{|EMyoIWn+hQ%g0yU${Ub&NwvB?*geZ`PvRn0r9wZfZp>fDI z!=53^OVgVUFvlm;udp+b>Gat}&vm#b+2gf*^61U|2M*M;#SVB&KwjSy6dqxWA#FkZ zGF`4$2@?L?CueTltXNrC&axMGyHH*{tTe$UC32CU=6iqG()+(wQ~g_M71E;MX?e}l zGyJRH06!gM9nu)|{NU)}weqXqfK@~mQQaRT^S^obKj`Ft(LXq1MWS-gweN>uwg>l~ zy%L#p9VNFw;55jn-+Luik*xRt9#@yueE9&!lqy5H2u!>q2jz+P#}G{F6fVsO>aFI- zsKey$Ei%chl}$e&3Gc1+Hw_EC5#dSeNPs?-)>8wwum0;s5ZrgH?$KFf2a9MmS7y8u zp(zR{Wx8lkYZF$eOgl|t32+f?n{>b9y<4aoAss-u%=5Efy9YV~X~ujPW-n35mi&~x z>~$lMEcCNrl%~WFxw?k)y9YXvM*(f)d48 z%WlKTVdM~&qhHJ~L&VNvWfZkV%K&1_)|Seijqx9%KWe&P?MCNJQ0>&<%=>wPKOKz- zbfP$->2Nh=i{H-5a7X6C5w%_rzRn`G>U_F2@9=*W%?3QXJwgcFd!u4*_36zPm*q09 zeA2z(19;P%Q{ME$&$i$P%tuZdGo8P5sCHaf;+B&v{esS$doKdd^Xc{z3j=k`q6TWTg`j04*EC>!{Qx0L0}12=p&qhSzW! zyC>g@7#iyB z&#WA3b*UOwmiE>d*pLla(8D}`+%i%5{yfjg=T1Bltwpa4N0PpYZxt=eH^j|Lr1@%b zdHuEkU#w;y&-Td+iJr$_J$!*VC-wsG<9sv~wR{R=+Oe_I@j}+i|27R+$xY-%S2Xs2 z3&_6pMPvyHjU@9EV9m~KBew+(YJ1(IQUj!IaN&+x1tS-w2>Y`co}(AL=R!=z%lVe4 zPpL7_7>+C}Z;LW-dEHc(&46e&Brbgf zheHLp9uHw@ZcN?C30p^$i!I5rSs1Jub(}M_2}n;uugXowWlcZG5QG!C(^3m;hDVWu zW?||Cmd>!hFwy+~h!h4-f4>K@Ss3J|x195JWy_57S$1zj121#t0=o?eU#%mP#(9%S z`PHjU`YZ#`G)KS9eEtLUC{?MKoi<&3YR~QOd}pC^3+yW0q*6$L_wFnrc94qR{W~50 z0nxtNnLnx1Etk*ID+{pK-uI?D6x{903htW34fW&O!wq$CrFbT!;eKL=)p3Vd)-eU+ zqg=Xx$N3J9v(>GPWN236btXGk&>A>mm7|xQ{xL>+_Q9%j!|h(&47+2Meuz4Q{vbS$JhE3YR9QsV zX4}KuJd0yq%$!&cvDTY60Lq$`O$sOd%9=ff6wb=FNW3_sr%TYTCFu0SR3JmGCzP&N zpF5~rhAIr?1z+~uorpaD;rRLX{LRd_fq9H00xN}_JYt#E4?N=W1pLN_`07pK zzf8x{(vZ02+*iiCo?LdN%M0oLb5Flmv2UOKL-R>?zs&3@l)n$WO*gR{w=&fwvBOy5 zniy^-979^K04hm9QB`t~sHP4oMJ^pz6OG$Dm>$+O;>ym3sksv+Z@chlRU}iMAVD)_ zT?^^X^Tx9A3`mZIEy?7eqR>^tpDiGKo!diOiJxitWSTup=uI5}1xB=#^rogM|mpdra}8#~j(?kIcm0 z3o4Id4$IlMlxpZKWnGjY#zchK;QE53Xc*Kd4i5_O(fvyh{09!1OHA%_bJq_reCx_0 zulxAima@PAx+X!$JLhc7Z`g=Hdf&zYXmCe#)zq6G+_TNLploSwMbU)O%S?qiMirBu z?^@HCUh>75KhNI)4hd=b7Sw_@3#to-1@TnlJBsL`BuxKZ6Nui_Gx>TVGh7*w8=)Zz zo4ejAETcIrLVu2z6)G!D$%|u#5a0;=YjlXfhaJi-A2Waa_u`AbUeMzC0{g)F@Y&0D*%kBXw$-xt2)$redqI|tP zGWALV<}zX`;x3Bc07}XkG<_C9-Lt*!?1bnMPXygjB6g7V-uAZb|aCx8_w1ppBL`*Y%m zmHr2xQ@{k#b??t8{)6~c=+7Se`Nx8Pi8RO09Pq>HjHX=%0Ibi(aJ&ow)?Aik=W{iatnn#$xPyT{Y(qMVN%p?MYE_{WN5LIA*s2vY(tN=hIr~N?Wn!& z3f8)E>%&BD&*4tUmfjyVQNoesL9a>b zib#K@tXCGfU1N$*FAk?lxkGpZ43#>1}f2fe;$jPxsvH~B?_78pq zPg<2`Zg>R-cUGu~1UR}$t-lz;R2Dzy&@mWo_eL{FRmw(W;;_x=RX9v@!+?2==E}d& zeIRQXk1lIa9H;3<7sZw1vJyU4yWYyXQn?yL@q`&qt=>1Rg4?{>jar!=h|y9Y#gXqh zhWLHI4_u>Js8?zt-Qs@FXg`>g+-t0d)G|sH)B~)Jq0aE`IH_e!!L9i zI9pAYA8?b}361dFRqB0pQJ88!FsD-O`Sub<`v8nQ2t^6oz9wjbEy(-`BkXXLb4 zjwevYC+kszQ)R_ikHDMKGs#zoEm1wZ+O3KpJ&w2*Fi>w}7b3~Rm)P5r;wh-ZTM|d5 zT<+y=5M7keQ;;(s#D%um$h6FOvBDD{&t*3xHTsgv2axG_B36ZIl>)3N`W>AIca`U3 zK92Obfi6+xGjv*hDLS-xa7|)Qp|s<#3N_*LhQ0g%Y0<{AU?60MKa9M+_r8Do#WXk zE7DH6vuU#hJmfFJDay{+A?bzUK}2$0T$Lq``SVaeP+7?dhwS}^p2-vwX)IuYVaK9g zjX9pKH>56rN@sOEIdGd2d&OA~EySO&l*M?bY6DjU% zediPeWdkX`wBs@@n`=@L8v7wZjD*Aw2|4h@CGZnQQSw&K->3$Jm>tknAhiiox{ny7 zX2C`KatDYk0yr)0_lP>rD741-Zf~mlaw)0FXEk?<@X>C@tsNlIcZ+d{pQXP6!ts^m zRrGqx&tdHuL6kpHlIP7&dXNUrJA(uf6l8>?C0S49$gS~aF%>!v7m|etbmQ^yf ze(OW>Lsi`s?fdnaq+lFg2sz<{BCLm!(8)s+KnAzs!zHnBF>zR{FYSxvu2c@$_>95; zg%+bG@)q3pN{VknyiWyrrq8FTJod3_9{_`DmX-o@fTIB^{M&-)YqyYqbWxY|S^mu6 zlofRadW##V4>-X?wX=jrf>|$ zf5CGF<)Q!~Z?05uDm1A6w44U;&k6(BlE%$Tpn{g&j>K~ekq8f;enaeux#E(emR-~X z)7cJ@q)8TIPyA9#u}sOh@=D)Xp(>uRGGe5pXOck{kq9L**Ac*NzA5IF^-dv%FYw7D zZr>5#M42)YIZ_6xsZu;xiRBQH?xF%&7$p}ga9Q;}j>u;7MLb~{HoEFXfU-f0)OgfV zld}<@s{#JT^oS|UV1>J%%P~~;aTq9dJ-PAfDZN?K)Fd8Dn$>!fn@ic0ie}W~4V19$ zVIMHHG|hDQ6F!;2?M7Qtpl?xSdm;L>+auF4SDn_%$<&s<5pR!BiZbHO*skqKVdB$b zDdAYEz+TBT$};j|20%}kEPz8Yj9FT+b*_wyOS-86Qtl?+;^tV05Q93*P`-D_e|I^8 z52WZFz>4Dpd_&84r1zzrG^TLz)ALj+Va6H1x)N=TQ3uk9u|d)ys+C=S;K#MVw3|TWo;7NIeR>(a2&&+vM|+zM=BY+#@k}? zgIZ~BGzSV*VHaxR~RaooLt7ess&iRy1m- z$VJ(=P?OLy-k2b~lbE13r!0O#g<724``U)>6?e9U>n@+$q1JbKuGDnjF<;XjIuGKH zc@cyLj=%h8NvWT$3WW^?8pzrfHcWo`p|ScrJJ`7++SB+Od%;%uAOn5Giv5iK5Da$2 ztfiV&6Bn+d=AUN+tE{#SYVT-ii*nsI>E2a9)(idyRO*(>tD)K%x|)B^IMpz_SWaZh z!Y5X$aWycL3=(DxsyqgGQ14%Xl|;D0W^4SgIFGWHwun=X<}tc=H`O(J;KaQGUMaIC zQ}crA)ja7lLXCv#KY#|@VLxytmHc)PA;gum67VJCOPEqj{)PWij~a?fspB%F1&{3C<>KlZw+#Ds|A zb(1XCqDx56;98Z_gh#*%EZzs5aG9NtcXS_*+IU7mR%F!pv0^IeA)jgipe9Y5laAWv zSn`6YY$^Y5U+o2VYEjkI7Yo_gZ^u=AnX*BJEH8C-JWr0)fl*C*&tennnBZ0T*NlU)gfJ9I`2I>G*F20o@dIcxfWlZpsP z0Mvm{8c>%mVGJJ_HD!1EFv^c;uTq@lizCrI<&D3;P=ujvPkm@Ke zMq)(;J1yc*b2#64m=k%R7|0lWvn4)9!UOZlw3ZD|<0L=V$G9-TfTl_;O$QEkR1CHi zvW%7*-zmy2q94n6D03Bn+Uj`UcFdo%_qbj3PKTF`6t8_oD84VPaA{H?xV|Y94k5Sw zq#~t*sASXg2}iCTxh%bS-OomSR!K`SDSjaH;(?+ei#!mTDKNUH3~uT+!Qe zBcOET6AVlL(JPx|!1+oa1?%nbf$WTsQ7l_N2OYE~Us`!N%TIi)%EM}2Y9xJio6|#k znLjHoaHh!e&D46xiNE2#PzGsZMsR-IXQMYno>uxhLJ^_T_~AW{E7Qf>s9W>A3;jo#9Ko zTaj|ujI)LnJ@$j)vO`Qd$xyP80$%|SZ%MPUe{mQwR}7B8RFIM9tXU>SruAy((Fh&$g4Jl55BR$N=lO#RtIwhr(0d+2 zPI8ZY#Hk9?t@q=(;B$I5mG17A8)WhG_~B3&6 zwlR)?%isp(M!~&I9veb%y%#=dQ8#uDU{x;<-uY;T*JAFl-L|SVwq(GVwfK6wG6EEG z_Nd6AM3oszq_7W~4fg{BSIdt+)UShXb)Lu|$VDCA!HNfLm6w{yJK6z?1l>9-58gp5KEq+3EP?r=cgWh)3=R${|=CuJ=tRN)9E;aD8 zaHT&M0Vm#gO~o-dGa3)I>{r9n?(D1N&|yr52Q(^9dNj9X`YqSHW^)}&D7ugGIdD{B zm)*@O6Cz5r<_5VF_{3sjXk;C_U6vd(enrB)JA^5csBOamFt^cgzYlt|=g7k2-VSZLST1)PMI!~mra^}?*W39+#(OJ@O5LhXOJgmvs za4f%cV4`9tGKmYmK&jpUUXH6v9*;dx{vTo6wli#%E)HLljCSoIt|kfqI)F$=#GhJ7QXX1@%Gv6mNmTfxa0xX=NpO%Hb&891*BU0 zvHsN9hnn6(D_yabd-3hlw@H)E{7^pJj|G}_7|dc-gECZnGST&Yh3T2=GBFoNa^^cI6##=n~C zS}Tste`N|J3^MkbiPp}9I~!fQa{{4u6Sm|grztPpZZLemVL^23y@~jSi4|s zG-1!=;n{!{_uE+nnbi90z?17$r8c`=V}r_5x1g-OhE4T?3fa@(D_QvQinGDQ;*tYw zy~-;Nb+p9>Mg@pbG8P60q!3>+^kx6@!}q8L5HP!GvR@ZFK+hE zl&3J4P5XM=8EzhX4cr{SSM|a>eqXDoI&bak-PyTro2BWh-c8W+B;*KrU2diX2~kEmbR%I{dop= zx_TH~P`w{V`9i*{`*L&kMYE}qC(K^DNvYMz`04An*c!h#`@Md_U1m2p^~%NX_@@+! z+ia3RGrfIv)X0eJX{QU${xA04GAgbv+xITq-Q8V6;SxyUPM~lN9$X3!kO0A53N1)* zhu{(7BMtsg%Wouy~JxmO)?Tz0UDAoETU_$GFBRzJH5LP zL#E9Ik6P;wQ+b)|(Q3Nh&b~x-|C2yDX7ii}+8OgP{R{(1b=#HE)2W!lN zvN!2v#-~^3NEFD*z1v)&GR3W(E}L6XcHFD%aB~nhZi_S(v{R6r_*%+KmAR(jj$*oy zXw00jWZ?6}tB~t=$AX?ya0Z*yqw_rHSE`pC<;X_pZO`k|I^B2d$d?EF#%@B1s7W zv4PK0{!SO#jlUi$Z@9M7UAF9%lhWAyyo16F>qDV$pB!>3!>S;a?ZKSwZMrcchZ8drv|&tLqgd*#LAEp>l$_(V$C zK%HYA9b3tygP<7iVh1M-q*>m-qKg(P`=T+a%1g4P-t@EC#K?2TbZzXBWb=kY$5q4x zT$@Cld^%LakQ|6trK(4Oj>a+UKJ4xZYA}Wy@i)9?H_2x!z+;t@wyvV3IpUg%e>X^) zLqZ`hkFdmBr|rLY%MfU}H^D0!guCf_CrAMU==muT8y4~m?c|Uy;#IwIg5tAnR8qFJ zmBnPaC3Mv$$mjx#>ar~yO- z&Ds|c`WB#ifVB>l6-q196vzi3;P)XbmT*4_LWcX@Uw0O0s5d&(Ut)ireTMp@`bQ<~&LavP7tz^3^xn?jUYFS)#0TFZ#LYoo>bU22-$ zO;y#&N8CCd@xaNpBNQxg)4otI?S0H}RD(4YDE8*EF{X6b3F19PZ|{DyXohA>ols1M zsm(Q-Xxr;tNHN8Q!@}9IB)QB549SNpIR9O1zi z!8VGQ>YCn{nC02Tu3@0O{ANe&O^h`x{o8$rLZ$0 z&0-f>ZCy4UOtX~mSWHH+`eyLC$F_3Evr8A+RkC##g3xTWu0kietvi0iGoFhv* zx=I&J3JRrhVriozkP-(=>S+z`OMw+Rnhbp8h?P(G%_Y-J#>gJ~pck!tPa(YnV+$U>kR!4?7_5vM) zv;*Nis&&&R{Kmd4=j8Y>2;eq(>S}Qy?62YyXt9?}pY$Dm0s=~UC%C}lU9bPXilebv zcWFD5pWRP@*+K2@Fit@5@^amCS(Ol^%Z+kt~Nc zFa+H_nz`d()A&c1*4Hp0>_$Qd50MPCfAex!R?o6O{shFS&DX#G`^uXJ^I2%G9!%W) z1Pqos8wdQZjrrRqoHbEaUhdx_0iR&eny%lKF@Kv?v@Gm&=IZX*B@eCP=5IUtU2roG z+~;?A^X$P_XcF(m?~<6m&8f{}{U5fHq3PAren|zR2^iR%V0FgWGsN;-tt26+IEI&J z<~1)*ST2y5g$$Lr>}xz2T2u(Y`0JwCTK=!z*x%@f{=aei)n0PEy`f?IzW4BxNoQA$ zwyUYXUjk#Pc82pC^GPEUi^^r52yIg7VldQzloKgLuAvZpE;d`p*jG2@qhShm`wH89 zof%lr!vozYr|=^$=t|?0*M}7$aP)@wG&14T5WFl6>_#zP9W@(O3Q5orRam*rL)>H&}|;Q zB1K%VVKb4KbF(92u}0+qCH!_tC6104l4rs0Vp$#fDM#k$p3eM}CRl?5!6z`vai9U$Z=e*_1(FE?-P(z|nr;CPSjZa6Qt zw&bwk{s_!E^ri-VjbD@J+ylueU}K<2RiJ$93kV@zE$fhx}5NCZZAw4PsPj)!Sa0B&PQ za8b22qW+mZU1cwT!}Iz=+4RY>Z3UX<>JdV(R^mJYNFs@1>!73_N@#n`VY;L)1`b)k z;kjm!0D6Dfw!8_8s(6uyvvD4(q{swOy#cC+3D%xutP$2fJ1k*&^CkDZlf~0>BjJ1A z=Pf5@>slmHPrEgJ;EBv3%5H@j5N0M8bkxMgWbve27<{+!yk*&JUrR6VX{07ZGK2Zn z0a8c?>3hI3s1TL`F0%S(_M8b$^lpJpa*VL0xL1Ie=dBa@KJS+CesNp5d5>*sC^pt17J*>!!oRa&e?V+>cqL|0C7m?5mGAujef{(D!xr0Shrr#K_&NrAZq=w`x`}U z?DuCVNP^MEd}dW=ePEaP+aMq9bkLazF(F)axFgj9mlO5GfR3}cV!=|?84H9WbOc}p zxL28Ncx=aX)vb0lZ3Rv5l;^Ur4vpZCUBYT}xO1=b0Yi)BD6@U2wAWz-Zb*?jvmrq> zWX?eVt(RL%%C|~j+1lp z5N%q1?hmmL;`MrrZSwC>!MM1v9lA(dW_@h8-wM9|kh?RqC-Ut=AV|Jr`e8YxXh z%^BN0yLUS;)H?Cn<{{jopb2j560ec!CYT1Qj9M1H{e$_c6sUObzL$#1*j?rg=C=tGi-md>#=6bs23HN5&J9GBB&CXFbkg} zQV46l60SR98CjzkXJc_8mDrdmRBp{{8O@DTtxsUTnzh1b-gKBh$QtAtS1Xh7bl3B7 z^||N@uTHwZof><1?3}iwwFhrp+_R5{BZ=mCB2!F9a$L}U5Z@xo2 z&0Mdwo!_QMZ2N9hzuQM3UtYyASG5X-5pDwfh*8!ny|hKRt&utcTFl_MnC3meIn$4R zkT*eD;(KW0&{s-!w?vm|ceQ0A>s@}T>x%o9VqZGeOWNaUHg?4iv2zDN7?PDQs&hb{ z7XFJb_#4kArWKiNzR#~be`>#;)+{5g$13aZIE*3_2%V!y<#qyBwo7Qmh(~bBgAQ1n7whV-!;7zx*-etxyqK$oH(2hYY zhV;&izD!wlEWR9lyzs(9SyXOY+F@aQY}hAYDrM|$X-xe7&XM_oPudqc9O@AqR+TEz zC)lj-DiHdjz}Q*5s_FDFgNY6h<5qiquARp~FP!~mMkL9trk#GRy54BJTo2gB8mCnp zJ&UD4Tn>zB04Ra@gkmtT^|LMU0W6i+$RxnKr@Uy%K~^S^nbc56yGc2O0vRJ*W5$1a zIhfsO9@RkuX7bLhTHBVM6}w%xUWYD{P=UB&I}Xz(z}$%LLU&otwEM$u$MAexAoIM| zGG81P{=UX#Q{7O%2I8biw-h*7VM2zERNomQCa%vZLzxyVJzITh;wBiI@8cuxb-k7z z1}k`+vM)~s!3w4(KnWOheq$1rj^_lgw2xIFtl)MwgQ3xHRqiidW^9Ch)MTU7)Uu>J zi$71s-x6TgwnK_F&>3KcTp5ZGqbuc$m#kH(bf!bGn66YtvdcRBKyn@F-ZZjwuJz;Z zdHwN@8ldUYNrU4(nOvjn|BKoSvtQ3RtOs{cAGDu#WLzRer>P+30mU;x~m<-Y$5}44cmrSBLiVANg)Vs%@0E&!MSt z-n`wGLnSYOgw@KDqP{!@;X(bAx+L--4D?{kL05(+R$rdyCw~+`oN-t?dcd9{o7|MW zOdF2vSnV|oY4RUd*{z>ApdTzgFO1#=jKOJCd%rK79We>MVqB^4@E1>S&hXQ&XhvIV zJ~Cgbr6>iL7`~0ueY<-3eXVQJU`Rq{VnAV~#muA`JV)R>%g{i@bFf9a81*J8Ad;Cecri)eEhg9DXK?0&;V2!r{kqmj6;O2g)Y&{0TVuJ zbY#7d8EQ!k>BcBL=bPe`+)^)Yn)$+PSGj+Ks5Z?86>BnZ&=z|G4GhqtaVdZefq&g* z1seAkWI#aK}UYaytc$`K*M)v=f>J z`pU|E&VAu{cEebM_$qRSXR05c>zoq7U-U`Ei_6XFn2Bp+>af;j!Izd=W>e_MwVG;o zUkX;28!CM0(*kqqGcd1p4+|~9`o?9EvcWGOgKqpJ|6GuXhh#5G<*S@~Wcn}c8qwdf z2bV82e}Lw$jw(*Fxb}$+3$x1v`G{GNm?X;QFcb|LqHA<4Xh~2j5pC4j+ZzgWUFK#rNp6DSkXWAKZK|gq7-z-L{Z`t|O1}QN)xT?dUuC|6|88R}f_TM0XEz99BfPyy zF^{ceZQqPmZiAZE zxqR^$7ez6K6wh&gTFpPnl~wm|i@b6nQZ{9$2d-WRX z`&Pk(3-Ez66A>Qs>=l72doix0m9iKJqZ&Iro_UFB8ikmc0jsUv?DLsGNno>417b#S zecXsLc{+HujK?hZH#yw@LB{$+ob9(x$p6hhP^QGO?i8MV-}ie~FIy$1;zo2E`4jME z{NyLVA{gv)`=<1PYwKNA$l5otk!$mN$0=jD%NiD`4NXFG!k&SG7g&$&=wIR=?AEKC zz=3|;*i=fX2J1N>Sxd1+5Y}UZULMUytd^Eau8Eepk>g}3Q2y99=_+BKB6fYW{Gp$K z4EQ(&YqfMZVHK7lVWc>vkWt212C}Fkt+jSDU%0rY>%fwtR%I{BYCqs{=@;KFN`bw6 ziS2xGaZBY4weQFvFL~2?7F|i)g&Dw7N52t+0<=NQu1+7sM+qcIpsSVI(hF`s|4iQ? zMJI^P6IpB{|Lp7Y6#N~_o$?r-B*WJY7PPC5>jy)|_o1(f@F0xX$MPK}CNt&6eP9nB zblMKBL$$yWQZJCI_gJ@tpDAKj;OcuFP^uyr8lZZS*>@gq%wv^B@q;b1G`m3e-H(M> zvW*3Tvx@X-S`zZK-RD8*i1O#CA0s4%OAQLKSbMrb#LSwt8&6nXsQLG7B-LZJ&($AG zxL>f_t+1*O(Hl}mI1?GNN$}aR$F}ZJ2Zq1G_T3`STm~kCGyRPNwx)+C_$Kp_6{T&k zyX))_gHKxP^8!0t6a#7ZFrN3J=16Kr*z2lSh5?QY)uf|~Hp)B0!d&a{uGU{i7zixA zrBRdEq+?E4UEw$K*p0~Rlh+=wmkcXyQbhycC=sHfiW%C{IgU39tH@&jpET-ES1^u> zXSqAq;=H0QCYWiJ7%S$1LBgJmM!*L5ne?sJ*KA;AwOLAIeZ*9Dqz^3w_Sv#BT1$85 zEStRM>JqID$I)3-6C=t{sX=cVlmLqIm6?&?j3i6aXZL!;oi^Gt%-r2WCeky}rY&w) z>w~B_oSi>gkYFBKr%DPHP*7t?h$ca;65E^+60!>|3|;10!H%PSqnEC|-3hM4j;5kj z>wb(~o)a#^)cqaCA)8VGqbhBt)GLB|d{N8VZ*6vsPCfo5S)vn4KdkLg$9$pt(MI0io2#96l|E=rB7~J*~z_p>V zklE^q!5+OIlj$~8*3SGx#arM)2^q5tY*L`Gu?`8}Y3*R$zL-VI?JOv2=xvzwUaUMl zapx&(D{F#2S_^Jpu~}%T513TW0zZ-6hpsD=i`=(0axEz+uCov^DHksFw>RW5P}{RD zEsF65ZKQ#UnJ5cT82j_}G3U~zVmC`f)grJ{MR}c53z3~SdjJ6LK}*xrkVeB7wzPr40RTQ7)dgbLLyjB`Gza)ep)P!yIfoyZslU8oFZ{jo`@_MZ=CRD5Q;2Z9X&H zmWq6z6AYNZ7`(#R0rrJo^ExD^h2iFQiRW(?>Me?r5YB!*O;n_Qq~m^8nngVnkXag; zl;kGnwD!p-+C@fMaMGEb1TCjzP&mXI2pAHMun~GT#WPZT>KoNfU&ib2WBw?G4R7ke zUsx~Ebz|(zihlaC`#m=vDk8w&;`F`-FqV9p+?8?rB$7K8^MVY*AP-p4BjygnES3gf zue8?q^lvRMTE2CYJ8UG=CJh-H!9Kp!)h)DTa_+;o)L@pT1}LsmOymr;)ZZVH0*+q~ z&5}@&cF$8{vN5SOYBy&z0?B3kQERe!ihamo(~xo<&TWE#Q{5mZnd`&;udEz0ggc@f zPz2jOYV|H&^*ODh#z8heB6@x1=0Sb^Sh@^gynaqO!-f){viI|~%nglekNPv$=BFPe zLen=g*1C4Jzv_FOkGM^5wta58po!c|GlRqJQ3b_-ER%u^xEca)2nuDMamGh4u4C1d zcg6zFPvZsiB|93+F`%P4*B)QpT_&Y<+_@;{5XefACY?YW?$LNj1AQ&!My0N4>+MwS zqE^6%eWn!8gc7zQHX7H-q}uB-$MGc*bM?K9SgKonoGA1(2!gEuih+hY@vRVjl@*{* zt_NS6*5D0XT5ufEhIwsVaJ&^@qvFCBH>R-cWr*=KkbPm+$YdWQnMNZK)tT2U8opLX zdCoq)TAV|pgkWG+)dC;U(ae>ANV+QaOct05uV0%F zm`Nd%0>$tlyr}4Fgmf&xS$T(#ibZ=j`hCMtfv>`Szo^ZItTZoWJr!GZc`Z|lrgHHm zL-cSE%jko2miU}FbnzGi58@Yw0up+Lb+D%o+zv9Yy~k=Lmgc+sLkJJLM4|;73#J=7 zJ)YY4v`l5(DEM&U`uH-u>hlOy%MJU!M9@j4N0iuyvH`A%-+A6r_|{^Dza;PoK1jH~ zQGvCdr%o;gjC}S5Yv?ki>GhL`JI5ngMuzUmUP+&cn7AC*&e?WXVtgYmEK4}`rFq$f zaN;4!yG%Rhh{|sjXnBjR=aQ06-ThoVkr#~(_MCRhEp;p6Zz}4+8Ej}I6N78+(vjTt z%*<^tr9MuSbCjBO3;q(?D%l?!&xoFmwC;Anr>Ra&r6ud14Ncg-c^0%Rp0wuq)_>4p z-$l*mC@5<%aBK$q*jrE1Mc+*q*YFsQE%5tt^ z#5=M&($3^sJejso11=BPM3oXq(W<|WiaXIrs{|(0#ghQh&{^{c4r@jd$qEsbxy7pL zT)bSX28*WbZ=53m-~F#oSS)g55BKdiN$WSiXM}DX`5o@(Bo!Naz+}$_e~c)A;R(y?cF1ME>O`{_MiS|X4{zQ&@d5Wwz$D9a%M$NMs9DQa zP1okzhfm3YnOENKk{-Tq-(P$fJjXRAdp**DqP5!G-&pVSmUBW)v$eCo3DGYjiK~qS zQFJqNp%dn`t$5}8Evz$23a|9qz7L^snY1&ne^4n*HBKf>Sd8S9<2Bco*m1sw=_o?` zi{31e5OZUvSZl(S6?wEhUU6P-8f1@n8Lvs%!~GZXOafNk`G4k1_KM-Ufjv$S8Qg4j zQB&MM)_ejHsHu2Y6NHW}NP#((fuV$f>ZXpu3?K(65uKCNnO&22O+O6SW5gRYUMk=} zp(Wu#RZH^^<=*DNzL(*#rU*SW(8bUw9$d$Z>nrpjswF8h?U1ax$mCN_;_LON{z4$^ z!mC5K6sv79+i)yCU40oUevcu#Q=7lJ$?`nF*!9D@Zzf;FRTkB2TcCSXJj{+u0zKQ{ zfSrC4-)D`q0@5|7jPpi*fVa8`L?=r=nw3{=GGD+c1 z=f<oPu zSAUlw+)SM?VmDGr7$Oz+q(dw?^VSC24RxVgWQrzM!{|T2QEgKrSgkmVR%urK9TgN8wCxBSF zQN%yDtSK&IF#%=#Y_0@_tKvLx0WwRrtcB`-z(1j&kJCFX;o_@bAqWa?`|v~ou;^G z>%ow@K$ZtTt~z^H7#H(;g^r2V&!1I45`UWH{UdAQo86T{YkD?2KYyaA7HkWzD|~Hr zgzJqI{4}kz9{G)oiZRDdAzn^QWUP(MfUhmx8wan9W*h6#?ov702uJSjg0NYhne7jG}BtTSLq@%b<({`$#l!rP7 z4{rTKm7h>$A{^I-J4g4?9rNxR$?U+YqH#Mz&sr+XHk!7MM#C8nz9R)hOoX(qhY_X7 zapnhdSiC%Dy}r~uBXBUEm;?dwB-k_xkemp4eX6D7ytU6T_r1(Z#x`@@xGd_K+P*W# zp@HNB-yQVpPj7;G_L`$3mDvN|ZOKdU6!vw$OUAFF;6@pJXs}iFVL3%goI$&5cdt zAG!GR^W2d!UQgLu3RyDomV`iL|7xjbzx4+|IU(u0Tyj!mG$%~~CM>G-Kdvuirae`- zZF^OiiRTwE3?}%ZGRk@_HbDV{zbkJb*7wRKWadF~IQvlb(89u!3{9ffnrgMuQsuR* zY7IVCjWiLI!OUDyU56{5`lyoh3gRyXA|z+EG(L!m;&snX5q2#6o8z0ODB}4jb~)U1Oz}6Yt-diJ$K+!^|HE z@738awc~m7P}jZ@^a>0ZOt-(Um=U=bC58LUK+!)+ce|*S@NGf^uRwmsO{2vXux)N8AAv+Mr#_JGD4cVQNukEe8xNuUQWjW+Nita{ z;a$6u1ZKvq5V!T#-N3N7$W<8vCbM!Oa&QafVLcl%^sT(1^>XUQ|SZcVwP2P6O1l?DMJB__k0Bid(u?b zT3nn%+!5KF_PrmO3eOqYq#;50fCHNi%Z@(c)v$ ztv&g9=URa|T~c9w{d;@U%+JQo0VHR2qRJ-a8`=xo#_u;ZdV^j-k2%w|d9SR_Kd)U# zlMEGkc6F|A)#vfHG*ebME-5qc+QZ7dNu)AB+5@c7D^AtuXmP&WIT`?pj{n>Msnw+o`i9sXw+rn5H6-x-9BzRZFO~-?XH)uDc^0muSOT%yyMzqv~Ms zv`x{Ru<7YOU1kDF=p+&8F?v*$qyt%L$2>YYkFvqUjR;JhK~9JmM^yUZT&!(kO}N)* z@iB_=Vh|UJF1M%nd~CQMgDQvwaHG<(?EhYWI}Srmlkx|cQ+hSE%|flhNu8!p*6_Oz znQ}Now3XLk|dE{)CN-5At_1LUdD)GLE!5YwgNV=O{jTZUSlM~ni_&Uk@K-))$ zw*N!JT!$UYrjgyCpi+HTH}j@CdlmjVP`|D@FZCOFWhHKP=nw(XAju*KP2(PkNe8Yc zy+mj<|7kJFZmr;XWJ^K>Z@dVp2CnUBT_&)tx{@6ig;4%B4c}l;jip7p1d<3Xt`p~M zugl!n@)^J3hL5eCJ71rdPw#frPi`mFKK7{GbrdNR8-5uWL8B|)l1EY>B@JT%+k($I{!Bzm|9|9u*xVj`v*2XCR zi*eeZ{lV3c(VayX@C8oCN(>afJ>~#<(2l70e>lE~d#z=LqEiLtE$R(YT#-^##5`A2 z_YKXY)M{IDTSdy5GOi>KAs*BHD6=x~*YEw>9l$T~r9j40%?5Ff9kortgyAQwu56kX z&=}@mUa9C*%*nckK`KNvtVmw6j#lBW$x_H%H*Ck`snC$%!bO3BsT~^;Jff}L5{6QF z#L%m3*qvp|%5vEZ%@OI=WWKOjI38IsV4ZvDz`l&i-BlP_WXLWepyxZK)h`;f#GZCwuotvR;Rj7+jKUKjp!o0b7UXjiZwalM2CmD?F4 zFa8z#);a7B3*3_4Xw<ma;{=&jo_71;6xX z)eehw?tXjD5>h0>e8K|;GIvRjiwnCRx9mx2(bCK6ks!Gp*XUi52Ex$glPgb#CH0U1 zjL?iM%@}v7vY61PSe{_`AvWY$Zd(x7r1pkUX{4MLw7r6Lz4Iwz;;2Y)3?akade||{0V?lhUci@Ph;PS zzkTc^8O1}Gp}U*;oJ-KqBCr_jPZzDv2iHKwTo#Zfou8!gS{I5w>8blp_V&kg zY?&9K!XOO_y|Ml3$k6m-$8v#=iSSSGVcAq(3I4E%awUD`0q!;v{K9ZX$t|1@&z-PO zF#F(_N$06An>#s?gon#r_$p=cQYije0^x?ELGq{<5a3JY;G%!JYWD%)x0v@ywWM~h zsTn4IZGF@gtP}7o&v)v4Z6m&(A;x3Wp@x)?O?fAdMk49Lxdkap-?+0GA*hk3VyK-Q zXn_J$lP9@Hrh7&pMvRhU%%owd4_tZ6^>}{uCxES6HV$`BP*vb;O;uU>v2GkEXe$pB zAH~0J1)bmwGAob#-9NDJHJQBs;>^6j0M7< z0Iij`Uu@TrQcfbq4&cCmZ;r2j0=g(yZaVV1_&&Xk`w>5bcp5tYtj#QVh)3fS;eTl}NSR*TVf6h~s`9n3Dr23=?CWOuAt zZsR<3QI}02^3K#4Itk!r%FX#Qd@rQFKxZHAqX^^Iq4$|9;zh?qkEi7-R6608SJ_4X zaNvUOr;x;jwM`@V~^EN3?(i>@^( zOgv9$3~H_ZT<7uPh3*3p;QK_Y8%r@FwO~2#&$K7IBjZ-lhOV4-aPv5|Ax9dY=WkxK zXRz$h=X0_1jOu?YrfcDiAt0#qzqJfEmHl(!3ss21r;c`Sse7(EGiNPX+bV-fP85$} z&sPt}zERJMBlv3vwZ30l7=L(ds)Bn-XoZuhrLXHB7Oq`|juggR*d@SfYx7I^ZAsF9 zZm0y=&=|aZkWy(W;$>ceoR3fdFLZNx`LpkwVOdVAjCeKou|E5zNNn6>9;yckxpb0@XpW8qBMpSC`6}d_(Ed=H9)zfH0KC0g~;rHi< z^eUeyu6<0YlK%g#^N+@9E6y{{(j2x=^`0 zcGu<5*jcDq*|+7?iaAk}ukuA_0mCkgsn;R9Ct^z@DbOAwWB3f+ShtyiCX>0&rle$L z)9GMgaXh`M9x#!8QR%5(6{$LVgpxe^Xr40^TY(sWERb>n0GNm|0)N}>ZzejKDUwz( zr@ueHjhSbq-M#X;|`dY4R@C9XSDGv^}dXn*Wol8}} zf=Vq4*QIW}y3Y&%(0xQ#Qf#Vw@;y1FsUT{Z(my^)pkm}vosD}?K`Dd2Ews#^J_1_y zpIhDVADXSYhTtbatz+XB^!RQmyvC@yR^kWiq30#a%kM^*VXe;pHH8@ek)@)jUlq&# zz;bHp=C|m8th;haq2QOO9q4JcTS|N+psB99*%e#Fz|F`wTZcgLG04a_3#ISB9M^e} zfry|!aokSqC}%xiut+DR@|$2nKmTYaSVg8@Me3$x(J55isFAC$6o!-G-KT>p!3k<) z6;_68+Imk6(x&SO8xYR!^X5C=OAUfSU zuJb$7iU>b1;!ec|)XFPzD;ew-)^Em07b}ylRs~k9do72>KD^41kGk<#*PTrrdF-t+ ztRWmACP%_M#(Alj9YvoPOC#Z0s(><`mZjE*%ZXBd?kc#_3ps1&c3mGed)qei7}ORA zq@pR)xCB^Aa8YQ43D4>053*<}3W8Is!k}85LPl421Kw*SK1CuSn@;_m>eXOk?hcO< zs=iI_Ju=)Q?j{wIS$um6wc&L=l0|?kPzxLpBs;7d+v6dbxo6(uECucI^|-ef8`Gwyz?2$u*di%FY-)Sh=>t<3 z*mkE?;UHt@NoIX_0N^#D9uOa#KS+WC0QxS4dql$)X~I?szF-db2K`?fU9yjDU~PpnAaOp+wlz|ML3UZAkaol(uWCs zmXA*gUrMk@)x74cqE>uH#9; z;OytgBgU3_qY&4MU_~YU2!XU)vhbf+eeT@5pfSb1VYTp}bTY-%Xk5ud1!O=ZP`P>@ z8I0y2`}N{_17}GbyK70a8*(;{T8o$8WdZWVh(X* z)8j+J$Vbv&k^n=AFlfB3j73j#cs_XutMz(?!M%=wLeLoWIaVdj<%Q^U=P&?f9%1o= ziSL98Z@=8vM)VBH5V29v-cdiKuEsdzXQV-s^zb(1K!-&&J%TFf8+0N`0xcNNWb>MQ z&u3ybZCxEP&L|byH)j~S5%Xy$)ZJ?s`Ye+$30CbWi}J;iQ#x>R7=Sb9QFhrON{)A3 z!)Au>IV|_%vTLV?TH&@a&u}*chj;Ou;D#7;Ply`^o#_Ul7lVl9h#|i5&aNinXOybl zHd}Vls)^BgE>0hNC>@n5}=q7Sy`n$doB`quqmKqPOAs_p7t#T|J_YbT!Rg za}nkygEooJ1F&3b3@MP^+}71GWTl;j%sx4VoQs8)Xdtobt4L`u-Ufrt^5H{f4!a7f zd8Ut~(#nBEUp};=qB|ALW;jLfO=jIqeP(F5u`x4V)<|m1&DuElxImif9kzU=JlSDv zYm6zJ!lsc!Q;kbeJdpb}hH(+J&s+(hXs)X;(5Faez zld5&7`Se`Uk@Wm)XxSyCS?67^vZIp1!=*Z=THy`u_wF)BFUqTDvU$?W4O;zzhuiuYddW?|heKbVJ6q zX*3LK1rEXNfG^WKZ4cPTn~yV8+g*$>9rbu7jZUdSw;urLxYE>RC@5$+aVDF$RUXa= zw{JeArpzUKmyPe+YaXPt;tq0F(UYLhYerOiWb@WpxG3g|jy%8Fg`;*i3rr;o@}E1( zadDgAf!az1W@tE=2aaQvPq8LmQG+F<3BQK1+P3?mVk&Lm=U~SXG`N~exm|(M+t=1V zbe9(kRPNO=H*Qss1r1$f#BkHu3KGJ(-G?YBgm`nx%d4xZBJo-DRqz+C;>L`i`@29M z&mymNqk;~1ZH?N1u_n`aoiu~#Ot)NO32FUB3jDknwL&m}cpc{MhXENxNU+~@@Nj8d zl_z)%_(d96Aiz|Ss-jS5MpW$L`W+etl&}%BcvY$PW=Dg2QNRTW*5s{{Nk9y*Lhrs%Xy z3(*hnX7R{ER2D5eyH9FpEA|Tn3?}+el;5L9sHe3j{3g9q6( zb=tmCHV$B3);VmS+wL*Y7wS@0ai7%_)8HZ2JjKCfsid;RY)?VT&G@uYOjvK_*VzgB zdPFLBWnWQ+D0rBYS(E1@>SEJzaFhUj_gTp3TE5@$>O~mm-b-wEfCP8GUp$YcG-hWP zg(kF6Tf|7aZ4_=gNS<5VqlMJMQLs6*jS6#el<;l1pKO@Va3Q?DaCF$1EI*ft-+K`k zEY35kiSJ`{mPga%LVrqvk;qG<>I98M|HPofY8xKx>N*^N_10wU;vQVev#>eW|NWp1ts$dtWi)|UJ(Wjn8gSU2or zlP?eMlmUzRbA3+MO%#-OD1Bh=@Gw6Tz2`h{d5=x0BM;r&tkhK;M)pPthgICDE1!Ew zml@`e$MR!(=_&PD&1wx`F75$P#BNA9*IhWE69TuJ-+ebZx<$p`K=CaVSEaFy?~8q9 z98rNs$4_4;aPqCOH^fI6hXjRl?_ByyY57nO^zJ8BO>?3!plh&ICdJ?tNJ;lA3Pm~> zoRJkVu5nh;XcoohtK|X;If-@AECFJ(s8H+XW5f3YTp3@U=6o4E>!o~&{o>)~H@mlF zO}A!x5d&q$y}$h<Wr#pVu(>w|`Xpb_J+)L)E`= z$;Yw}&|DR};2uG3BqUm76a}xJ?j>!sYsral7Y*Vn{9t9K(6vpNo=F-r^`oJyXQvG1xomI3ch!nbd=Cd5kKiWwc) zvs)sMGq>VX;~ml*#h-wJkZ*f9;SJYV`(&xYl!{YExMy{$8iS^U#vQ9fvg(kN$L|0Y z)z)sd9BY*xhLBt5`9vloo_}K-{|7Ah-|LbFPv%GA-|h}Wm0s7KrsR0ttSF5YehlyR zpXS0I-srOVw4&{w{E~Pvy6|tS(+wbx*zLNW9w#+cyxM?>@8xnciZkpLW50f)M*<+% z1%@l5fpcWRDbA{Uuio$%Ah8yX=c8v)zpxe*0=WG_j=xY8iEBVP zB#HtIi28-Bus=hhEhIct?fHJ8E$T%3aU6f4EhhJcC=oc{$`!DZ*PmZJ$#WikH)N73(Ogf}7a2aaOUaV+i&5=W7W z&OO0p=ggDa1(9Jf{{w3=uuJsU!qJW06{6bck z${^7eQyXb(xWCXAeA+?^roYe@B2rpJNVEm?9f}Df>;n>6!S=-J7qX(_q3`!UkQEhp z`InOBpU*bB{(`I+TSek1ND*nHMt|Tagv=-+uBLwBC}^5&LyEJ3!^;i$mU;I3r9!} z6B28|(}(&CS@F;8ITM`Gh(u9vbg-q?{ehyeZsEh&hy8(~*b%WG{EkFXsGbL|fX|(sOL4|9C!4Yj4hU z7g@KkTdasPe||TMq4pag;eUSNR5|rG)rXIAd}Gf2dzbFP9;m?YCLDeMZ&);Jch~e53tu#*Nc?bSg;C(!)>H{PU$+i;` z;rtUxiQr0MuY!nyedRaGP>&9V+9Q3SMkTdBSCJ^;l%n1_Q+yZ0_?(&Jmgx6ODZ>jw z8dux`5{)9k#OHLw^_~oYk!)S2| zHN|H7e9gyZUqgZuoQ?d(&c7`jez+%X$d$hD!)6kU(i~j89t#T&NEerPthMNL+Lx`z zVDq{eP-W3F7gZ%yHYPqCQ8=*O+Ldbgx>R zbwHU+DgZ!YV{A!`T4Z2z|k_u%eYKyZhU6b=PNDBOd)J0TEUgHt#O z?(P~0?!n#N6EuOy`PJ#ZqjUO>b6)owqu(2Mym#yGHEQp<=3H~Fz4x4-Zv|Fs`6rfD zm*>rFM5Gfqx^fzSKrj&wEd`=E<37rpmLY%0R?Vi$n*(%&jEp$n*$ z=+zRdgNPNeiBS&ta9}H|*U09^Vo@hG2Ycr9ne`datTu&5qS96skL&Pvv%fQ@D=$gg zfX@)vXj^^Asi>B+Kj&l^$qr$G!FxD2htAU#v1E|Z-OX?nt#%BjE&G!}O^17%JJ*Xq zWbFvDaQY`Scp3znanK%_$b`J&He9!-QbV0@tL%$+;ij%oXew?D8}2r3HVoY)N;Hmu z@wEC}`a*0doS6ig2gYD(FIs1P``po1n&bpmrykBzu1Q2-+`@HC@VICiZ@(h4O{Pw2 zC&Af_C`l$bsmLW+CumTmz1$)chZXlI8{CUTQn62)bi5NKR?I(h`7vM9>A3u|K2DGP z(I5lLqaHDzLc;7G=BGQ4Od{=o#dZUSS}ApT1x{-*&)F(sR~9i+*;slf#?H8we6Q9C#KJwMt_=*Mt0@k9c!cCn=M7G8?4C?Ga0X4M(FyWe>I@8G3Qcf>h9_gtcyzEG*~!%Ts0w(vqLI zPaWB*mv(r)wE9ph;1T*H`IyCFSxG2wOoH)5kq9MkrIk@0W~ow_w3FV;vV(p|C51IO zIkKXib?5FOoKx%s|8&a((UNf4%y3#6O8jC!yJDxbrs49KoR1S7RbG-Ey;MYuR3{sw zO$_UOt{a@5fad_OT^qB0Lq@QrpfG$LyP$g4&ZDkik^CoybZ9qS=5-wXyHNW0NAdSR z-*@=yPaBo6XfBLiZTuv;5%7*Ik$?B|3t1RvY87??H)MPnHqLz)rTnz@3R$Q8JsA%< zP=6F88%PyAlA+Fux?L!x*2y1la%5CcG4Z`1C9TI!Q2IWuu6l%1Ro->9RGFk85LWv* zNs*T*y`1{AT^5{gq=HGHIwh7&FmK3uko)$_bFYPN1XXMaBqr4M+Tu4ryZ8Y-mWqW& zfOf^0R;uBE2gVdWnZ=={Zo~w@MD!cT;=q82-xSzhv+ERN<=oK{6xuT)uBffD7xvQ2 zM2$W66j4FB6q1KN&mSm$jw{3m_L(zSN?h1WU6{D#NNm13knwZrH0_39nzK53>(?;4m8d-m2yB7%AU#~ zfEN&WT zo2=^g5SwUSX@$ZiDBz>TEaCjVa!)ueNdrX-(D%~kYZu|cqz*0 zsY*GqLG&mEBM5z|3@GD@MN@FBbx*rwU5Gt8p-ZpHZX3Q^JG%!ImR1&C%x6BjGyr=MB4A!_@U^dvwppO;uQJPeRh5z~NvJ&&^#XfGW2)1+dqS9$^o6crAxt-zlNB@FD-y}L z*4Emi5z5we@rk2G+flefB+ZDUrC;ZK*}MI1Y&+xHd9;9eBb>@i9+RwJH+)UH*}pt% zhTja0Fna)B+b1%7Tx-Bsv~BF;y&V-P-=M7*F7ur<;PNU_r&=8vf*VM_t$is z-vF-Q_qM_n8LMs^N@dGZT|Vo4OlAIZeW8TnP?<$soVHnH8^?SJXi@JN1GqaQD@ZDA zR0q+dRXy2N#~kAckc`cy0Vp9Vfw0ik6qZOHUda9&u)cWq^!V8-m*ie19P6gb&Qqku zpXlX{v*^JZULn*Y?uW->y1TJ=!`H6{lI;tAYhg&eR)nZtWbD}=ixHfB(*C50Ss;Rw zFN?|=@|`n>?~#p8uFK!>g3>{+1w@cSuv4l{h)9fb{LGX{PXK;YqT+z$Is}M6H@=n= zdFHlkZlmi4dOF>?o8=;&C^RE+J8Nt@;k)s1ciZ9a6b!Y6rL|$9hK@61opEc3y;C8v z@!*PL$tp%ZbK8ou>T%FHuLWw z8Yw!H-=82k|J4NzyT@eGpsOQftV>Q0%$d#>|nR zEb{z3RQmNkR4hhx^?|N%4IabkVf~Z5$6+l9-gO>N>22FO1PK@HpC6R$;`%6YIY2*M zOD0f2rdkb`WK?*~A%^-jCoVPb@R&kI!On2-C*Qj*8c4n&L;RA*xJSQFCb=vQ&{^%D zU#@6bX1P)CC&0Vt9HG5K!;_L)_=mM_0&#drHZ(HsHV0JNhTxvJ*I1z(76iN(G+08b z)oN}0FNn-vWgt5@#h7}?a4|@-&9`CRn60Ihxy+Ep5-W|s)Ks>5TZ4k=WdVD)Ux@J% zi`4WaL^rkTy>18ai{4L8CbZ0^ySmd-d>>b@$dYl1+s+%c!u@K7jvm9D@AtekMHquh z&+jVneSSTB#BPPOB14<>Rj3U5V|~4ALi^F{LN%B6C`BPqqjj&;2uh)D2~m3ZoCYXm z4qZ{boa*K6n75ZzR~0GtN@8kLA{Xu2Ne$)p*fFu80>XI^vo@7T>Im5L(I`zI`z#L* zO1RkVS3DUm6=MpniS1X?5UC-fPX+pQ5RzVQ{H68WV$5pqX#6OktfwNuoT@pUixc4# znI}g`laoXL2G5h+>2a1m-<4#rq8UlUp00#OuK7B1ps?0XZy@=YOFw~HArVGdgX_l* zsLNITtEMIo3r;kK?4l}MbVqj~)%o7jDj~soL*_TK2-zAN5swj#Vri`6Lx6fDh7G+( z6=&)NPFIq3r6nducBgsdc(VCWbC$CcWR-oa)?0{HYnD@|ZxCeTIi=Y#DUKTAbaUddxE7K?MSjC%@iHPz8IZL8yGb^_L-m#hppk359;;}}|89v$H-C$`D$ z4g;IP6qOeeK2LriZcTM1&P#Xq9FUW z*=4DZV|F>#`DUh_&;6kLwbrg~mTnzkFt_*u%|BzC9Ad2^h@rU}9lS2Lx5y*lg#~8-1%sjGAn6oMT8`D#U-#VF}M*|fW(b*ATz`U50#DDD>pP~I6s;h6MsNA8t{lbpBa<@u}51{=HUuT zGM4|=yta11sOGltyy9Xs*aizT9a1mJFrZ}$(6|~fdZxQC{81C!qkmH!(993NXNu?5 z>bB#=pV=DL?j>}xI8gUw4$(}Am9y-Ua*_n#pmUP`#R&LhNOfai3C4JYlXhcVZZcCQJs9zLL_Rpn+jyBCZN2A81Le=jLy8 z!(2udF#N2-DpyP8!pLoqXcYK#UF5iCTF))W&$ArI6pBW=h`FhXoNMq7D@$p@w>HJc z#AwZ!_2;^<O+7A{@*CJc#E{F}{65m~dX$h{z{)tFj||-V zKG2>iKuKJd#E<+^F3gMu#__)0g3lYhoCS!5+mI7$GOfY9iH#hDZ=G6nna+&=6%5ata=JW!?r47l;^P}XbC5?6AjX_ij1QXPbVVi zup6o=wcC~w**-#Ej*e@YM8F=0geImSgAoS2c@I}|GY<78!4&OS!WqHKzeu7N8PNf$ zi1HE#ORSPpS7Ro4V~`7hB2y$J0CwlO|AZ}M5;12_(^$~I^p=J}GbTAYV`Q?VQl0o} z6t#=)Wh-dVDPP6Dc{-Y`$?+TD#99BcM(=)8$ulpY+QoBuMiQH?JEr8w{i$DZcS*xh zS=FyH9dyAZx_ivu0H5IVKU?n#=gFeNMpN6b>q5eue3|FltSzWIEt&8zs@LbT(lD}f zD|SGX#dSi+`1#1Cwgz}lVHZd@-{jH9?XSOmlH26s5l)WHvK?vK@gMkDSl#UHCkn9@ zBDoSFu6jlX`T~6EUx0idxVulg#q+$;BKmrV zA4w+MB)k+I^FGz;YAC%}2`4XCpSNN?%S5e$7`}7maGl{(Hc&NchQ~limW{`{JS@ps zUBw;0l-Iq{95toELIO}mqMJl&w#29S1$I514!Wjvy^41sD_P-vYOc>6cg)hxfBBGc z{Dby4z&ahW>?ZDR^XHjb`7pQr&XuFKR~2m{PDY?|Mi_W&UrVJ9m4N|OvILJj1tm+N zxm%&1l(6~O5nk}3d?`Lyty(kvrS-lKu#p;hS|>X=@P&pCd-X>hqb_*IFH5su6|A8i z-@SR1KC9!AhSC{O@cif}C{sr_f=;55N_QyC2vn@vE7dKM=n+!5eu?612|Y#8^oO96AbN@ zJ|iHEOtGN;W$iBqRK64_q(3DWRB4yrWl@3(XbzDqUA8FIkn%af5(MCH&oJ2x$E&qK zaIMiP&4Yc-EpsO|BILMhJMnbS4=uboVRj$e+IE8Xw53i=k{c9_)N5uu)aVm(%ekp( zguGG)kv@UW7aJgnm;Z3I>OaEMZl$BJ#X=cT5vMNrGkz0Jq+z#{MLNcrvSpSHXXAqO zt9vsa@7OePw~NTel)B|cB0kY`@v=)GH`EFa#`cllaZDTcJ5w<>B2*Nj=l50*rI%oY z(NEUpQu$Vq%%sT!6QgP&Fz#T0Q1@T>JoS5&&ky-`)8FdzUvt?*@FwD(SP-g4Cyh~z zj!L}h7+BEk0kFX=Cq0RU!$5WjET|gb41l%x;_K+l$T(qiCnj%$U<;tdT>L>&$Qx7P zs>rV1q&iqS(s{kna~7&#l=f%-E^ikoB(9&MaXn|bi5(bXn9x8befpu<+(^0`iau#< zaK0`S@9_Yg9hg-biL?wnd7L*{y{+_g9;H_e1e8Ut&#V4yNHdV^WWVs)PN&jRBZ^ag zcvxJv99xwkw0c@)K*XKO`#v>aHTq%ZH zdC?GQ{|ba)W(}Il{K#UX~Xo8jUEAaHw?R#%p znXU89cs-A{*Bj^++hp(e*u^>unxvLB=4Jrs^XQHr%Vd>LkE?gmk(j1)7*G{^6*D|y z2c7COtk`|f=!Q}9e(q2f9p4J_{1or;Wd%b+meGj&4^#?@Ryggjc8^#I5_aK zMmv6d7-OF>k&^`MZ=gG-nHr;jr5NALb zJ_Xj-y9sxbs+f4RL`QD|>o9dm7Q76(j9KFGbn0T{@InII9>?|e(;7$1UzhwtV9+Ue zq_p(vetJl%`GVRG`>Q>oVV>-h)r3&d#}y{j_agNx?s(3)4FznphOw`ANNA~Js$GW+ z8rM78cH`AN>0KqTsFaH*58oKut)zzAc?>t)aW<=rVg!MQR0e%BrN z_Ut)19bd^mmW`03&TFUY3Aho9D!R^w+HLjFg4Nu%*WrBVHN)Euyp zx(MB&m^5|&_6ug}R>K)|Dft7J^3hUDQr`Tsk$V|(K!?X8&35z7KJx9OX7O4lA_<)& zo@7{ORv{k8GlRI##r$@}s!8QG=FFzVL$-+SJOCmM{=@7)?8X%p?U1J7wD`H^chfh; z2riTGqd|O>cvX&77~=rz0I7*DI>d#@s{OQy!MpTnzMYtXZ2&!cV5TEG-VXcY`I?_bY|M<$}5fm!Lo&&EH^c#}pXuahe)SFK_SFJPJ)@;mU7Kiiw;>|(w&Sez=q zYm~qKZ$#`h+b}RdY+_iwP8>Sw-z~Q&gx0Z}3;OwLlmi8swVw=VSC6hg!B;Rjq>5 zy%;~C$rkGnEWBvi4~C9OiY?JGB_B*iSZ^>a^7<;IKw$d*PFtjf%m zL4!qPSk&Q+3ZTnLG(NAppL#FIBuZ`Dy0?dCrM$r1z*!prG22pilm@vz z20l2xQ5aOrefAMX@PLFMOyyJxGUt;!efBxf0yDMl(a0i4CeY2gnZM_d2mF+dc{C-6 z(hH|M6gKjN$==US@+W*}^iKG4U|Jz2AFBS_lxFJWj zq)DLLiHC`onC+(|7jqV3C7TuQ2*U(21X2Jv8KH`f*rj+ug+AFibpk#ZSysZ7`utBZ z;u7PSEr*y#_fNTc)F}(Zt$yB2P@{!jUnqPI2uTh4z8^k3CpSHo`yfm8f3UT0?-7KwEU-BgF6l_ z0ip|txyaRSDH_HRc(S$-9~@#82H~r$sxYeCB*}7z0f4KeCyb2Z^5oHj(H%vfe41y@ zir7LA6F6-Tj|KHKm}TE|3v%?VRbUdShz_U_-^wg3Hg`Y&H=<0a_Fvlpq^ol=emX>k z6EX*UCS@!yC$U__Cwk}lhLtr-sTPYKGYA7^1r>y5z`1kii7NtnPt{4$7Okc@ zBjyc5+R;V6StXBulL&tOHlOJRm3EU%Lf1w~mUn&Ya=biH!F5CS%Q?loynQ;p>-!;8G)eX>695D`8ICZz)R z3B92*ARx<};4%sH=o--^7nsYqOoz%hvTTfW(T0dsIWE0oa>G#U;+%%5`A8{K){N+HJA^H<~YT3Y_S;;h)>b&Sz#JT}NRS|l{ z?fdhu64y49h<%jUJX_2<`^hC?gRxe_b_NylsPR=YaMDcn*^Vj6 zvV5D^F+c$X=C(R$NuhpVPzh(yWQf)Pq$9gT#$P>4zF>RvUTe72;G-z zUj7et2iVMGxb8%vE`-sT(KclTYvY7I!i;3d+Rir3`6KM9$4} z$2xlARt}*Gj#hH!lkJfPg256T8Vk17ZJsZt&p%9eZpJP=eezntdDa+j(tYyFH=%=X z&tg%pY-F&kH?tiV8gd zJ}n|xKc#xn_n%wr=QWPf%c*nFcaHoAuJ|fyu_l|5o}yfdDrJSxAp*We1_))ktO7#u z*`88;wAsq3RAL1ps+HM1J-nqopX`)FGvTu7t=BPAc95iam70Fw+OLW*V!^ z(=@Fgv}-g}6JOWOcZY7geh6)y9X|Q=y204F;cMf0E+dYZfXUHhO^5_0QtDFkn%?D} z#1IEjo(_maTbXziLrf8jTxTL0ctTIl$fPe!{iaQDVE0=*S?!>0k<)syl>i8(Vq0%Y zP_0~uzKx`Hk5QgMHk2`pT}BNsC5z_upWC0&Nw(IU!+KTtZeDe$rEguoPiVEU6me-e zTm49;6Y(2>_W@DrskUg8SNKry<6qic<@fJ|7Vvi^?SEr0u{ZtK3m!=d67lb>dDy?> z*}!!5|B*%_51#oU)C9+*xfg#|6C6-W^m04-T}|+K^r#!5CP=^eB5@=Hp(a=}<^S4v zR_(b)^942qyv>E9G1=ErmveR;O>&2Y!*F8sn<5^mUCX|Y)%W;=`Jn%sXz+jPZAIt< z)o$LzKlq_81e8mhbyJalQmfV%MoQ6?4d%SyT#pYmYD9kmp*uYYo{WP zRyovZQTcU^cI7-hjqF;ldzHyvo85e-{?a!4Yut&zouTpoQAX2t0BRnj;wPbQ$2YQ) zz{9S|cbJcje#Ff7xNjV){756blo7D+{f;dS*ZIy|=Y7ZX9Qe^M=jpHE8Q-{BcEYU| z!@iP+anyWOGW&F9{1*A`7x7oWhX3taI1@G4pSiHKUVKg(*j(y*f-ZUC@rU8oXRT?` z6j{b(Tl&Q1-M?=1_z%NBF1zv&3kY}qU)Xj0u71ZDWaa4dBFIL){5L>oD)-NU<1MjI z?j?Vihi8`kooeGNpSzK!inI`ILjSP%`LFTC0b@}c-@r|Q`Fydz70+F8K>pU^JP;aHx;NCL`AOlG&D{X!`wRBua zjLtZzQD>>Z?J_D23S%DST4Ybru!UG?f+ETDftFqS*pY=9H8k{-pWNnWqYA3Y_Nn402<0uf$HX<)#R|o$}%W#7DQ7@0GZ!EFqn(6LBIA$gE z7P&7nLbOzZRe>aQ!B3{jqdD^f$Sb<#uuty70`x4EO}`7ST@iQ(8Gu3T5{!2a?cBup zcg;>-Jr%V-nK5!7je%9z)xNZ;FPY+EKIt^tvYI_Xf?|YaaW{na`&Szz)|O8_i*jQH zUM=+1;CH#5u<^HwD`#E5eWp_=E@72HHU90rO+Sxed(?O}Uedw$!}$4Qytw7!c8>`3Oy{3?+=zo@=0aDhW;0zM z^dkeA4@o0ZaB@A5)mZ+v)FXSVLkFymM?2tX!1v$(>X9FuD;$3+d2C<<&GBh2^Hii> z{i&#m!peRd6;>7U*EB{GIxYwCOYBANe0R=}!nSU)-P@Nk1+x0gRqBmR? zo49?V4b8j)UiwAd%mx1v2Sm>NZ+b;<{<$XSK0+~I(f)_}eZ!4?{8{shS-K?RBF;A_ z#MRLLHo`0U(M~3+C;FQwl_Y&^7*prdnzJ0(hrv8OgUBxV zD{+?Nj2bD}m5o}PFNHtw2{fC2*6MwicJII{BxYaVha#gZ+GEUc zhmp`$eygQ2#6zD9;r(DIoM$(w?nLT3&STXCJ1^%qgz z>TT6Soo=_&_M@R^l@B;YXBrcEptC6!BlLVVd;#K&($X07lmo(;&~dZRJtzl6HWavr zpKu&Uk|q)Y+pR9pXIYw;nYU#NM5ia#uRTJmdNib3kI)E zP0Hs9HAO_F3M~#EH15+LZj~vY^ zUtd(PereZo+^{=;eJ&x}AtD)82dQwg2)jB2>E6jz`PMiqs18ASSR1SyQ(b=p;8~d^ zPLs^4g^6NC@KkCLL&|PJ=@@1`SEN$I+T$A#bqQu}%-tnjF*Lpii{U=z`ZYDRINT2# z^mR$>*uBIzVnfbWVc5&-MYa&37H22HKEmWq`HA+DhV?Mlo}&{TJ`4M}-PS`LW5|tw zHt(|Aa!cmw_w`n0kI@cvKyJ3u5Xl=$oZLr5))<1zH8n5BtzC>r_hwlNJ_(4&<&1e7 zy+7DIm>3Y8A>&t-QxheM!zLafk(yVf!&q?uPTdxVU-4fxe|M?bwDyH^S?{6iPV;WG=`8MGgORVweL)0VG2UeRGljxYienk zQ$@R;y^dSOCE!uM+>%s==nE6RNgrNt;*4p_-X~+?aeZ#!xaih^CTB|;Gju-flI7+C z_VFVX1yp+CQ35{UO;?%LoPNE2AB!Cz!JxntTTTWRu$;Wl1>d~G!2(@7iO^}Uwu(Cf zZ@upp65B_W8W)`;Q);YVcTVc04@{RVM>t)~=Ghwhn-;L~iNr1O4nq{${X8s^JZ71x zD6GXH9H!zdy3veC{#g;~TH&5+Shyk>J~j=Jezs{UbxLL0vwJ?%3Y8j4g~VrN#+Y3b&Y^xS5{V7VxKgy%#b`1 z&se@jLAQ-C?HLP+T}i<{$v2tXKc9u+iJ)uOiqFjx^>3=TJuJSj=c}fAX{>&$ck^lk z(#AzJKoO%(pGDdS;u=TiFsUI99$>s6I_E(R^6Irji>uk$+yL+G#Su78=N1#fV zRPEy>l^;aFR1Rh+0iVbPE{57FqK?`&qF4k|M1kcaad+EC&NKD$=c$N75c67g!!!F; zmYZ3JfK`SLH3N&E5V-jAv7D?T_{%k(J6cP_2+LO z%qkB4yR*0U0+^)Xd~L?Ua231@Idp>D4Q7QK>XeJove0Jb#$#vF790{}2WT;0Xz5)r#?BoB94e~E9${Eq7?l$wWGfV7+f}R(%^gSNo zRY)9MeNeBB*pKpTO;&Zjyvn;&3rBlT6TD_-R~A}?k1qt=AkA7%(^$+WOl|+YVw^eD zy`Jp%it)-gYx&CA|DPn zt1UDq#^GdbNWcYqwR0ns=<-ab{@B!WZDf(fSP(Xe0%eSvG`%B6hEXEVz(>>VMPGbD z{hVSEw#~pmOW05fbYp7g?V zd<_~}k_X#Rn;J)9fomV|bf>i3OB2M*Zlk1_nqksGL`;QW!+42R(AXPu#RK4@?!F}~ zVDm3(RkX&Z>lW`B9ZUA$rYL%mg|w`hLA_c`NnGyi)pMFpr+|iGVmM~!!UmloZ-aZo z%qmwSDO&XSur=fNk|KTXb&HaPOFWW^%I8Jc(+{Q!3GJ+oqasnoBI!MGIo}H8lz0mJ zhUCU%a^FzL-c=|N0XFDq#7Gz|=2cFJ_1yjUwu5Dn3e}@DX$HjZs5?4Odi9c8tD#gM zt_$@y)GD)uTn5EFEO7F(d`E+=dOa2Za}cXTMS$wl>P!I8OTBG~XWt>&@2;;~Vs3Uc*}9dv1AF2 zrrpNJH&B?@EM-}rXk6XYOf zn&p!O8PLKo#Qp>7i9!A*iLnT$H{-HgAEIio*Gdh@f>IPQsK*BvM`znQYFo|XNtXCay|GF8)i`??4iAmgQ8w&qjReIWGM?uP6w8A z+r$lA_wxOSLyvt6Djk%VlC>U#H9s;flnSdZg-`a7W}|kB49b~cdabSNY`VIbxlC-8 zv)Nky@)nk+(7@<*dRLWMo*=xGKn;T`$b$#mIMP+6y&|e%4WG~F6VT~XXFHvB$QTK| z9z&iJm>)sxdQn_ketq4b{a^hB6aArVw?Z=#pGI#M%KgI$x( z5D?4)5V5vOLEC}%ueDb;?BT((-#Ug~hIJ*VY{Pc~g5M?yklLR~1+`?D6_%zb6>s4p zzI3Bo4{4@xwG~x;(Oo6$S!HY3<2^4F!)z34TwZG+)nM^S;J33(RG3pCa=Ka5f@FXn zl}6eTqtDst(C@y=u^4xoZLOJi0v&y=$RaS{hn;kJWzv@`i@B8v5@7)1W+MhP&w874 z;zwOyWw9_h?P*_SSYixb6NoUSia0jkY9zMGJs$sAQF=2pPaMh68f=b%feO3{hF`}g zgk{)q`IDX}^pnqBu^W?#U|r}RuDT?udK`tan}3zeTkQkx{4|MbywjN;{U?e%5*=ui zLZ!=LxW3E27^3o17ItT+fv%G>6C!seH%B z6te;b63i|F%(N;ZL?i}BGuV=_+Zj|7%~O!liwv>o(RuA)Q}#T6z>t4cK9t_9au!Za z2p?iySh{fD@GqJzy71ERk=9sAdT!pg_LuB;&x!xymQVj35^tJkt6!xueqNPc7)v)P zmR>x<9}^&Y()|d0D;l!mFSz!Mwy8RP;OityRysnJ!$H9A@+o#GRlf*SD0vNS$t@)i zje83?w>crY_I(&B#T}29lVamK!XhlN6c3NC}|`@L~}F@xuimgR7L+ z{7sdfhb=N$fW@}IP8_$w8U>jcVuDIUPc8(+r`?z$Kxpg!;@$s|0Po48G?RfaSHAh^ zWRY|92d-ncxt?&+M??MQSIqqN);p3Q%4gMY($F(r{+CnDSMSz4YzIm( z?ZHDb3XH7$j!Y}X>qr=J`Sj8NVj3+*vHkQ0^5P100kosVM9qf@!|Sq((Rb246o~S$|p5UL|Tdi z2E+)kvc?5IU+4f01QmaYrr<2&FD7{`uN|sY>iiS*?ULc9MWJ2WpyOR$xnF!GJNxJM zJ%>ni4a==#=0-a<4{D~A;{Z+N?m00Gv%8045Ta|C*fjo3v}|^rz)J;L3@+O1OL|#a ze6(hXliyyh9yH`TZl2Y$z!$0jBlxS(Dp0?HvnL)H7Z}|5+r#$*A_@i_ne}<-=;z6} z42vyvq|AV`S^{X{4AGL|pg=@xj!EF0IuWiJw#_Ha7CvYWu`g>mpO2LqRE>-)Wi~Q8 zSQ_nSqOm=}6F4P=H=2C=E3#nUEDcvZp+*U?JC=m37;#fkg+cvKUMMj4p~hEeiqgEY zO1-ZVWmQmV_C;-Tfp3g3XJDFYh>Vgp^Mj5}=NV$TQ4rDAb)OFk_m1YyXNmc=F%??n z)R+M90=_Ao*gAFD9z<`w_M?vZWa|aK5^D@?2#}6&um*Vth2?qvWoYoO(WzoB; z`W>C~kipwO7bAciIVCH}$(XHvE$Hr#b(}um7js;1d!S@%?VuQuXTfHQJCA@3QGMr_`gTV_0AQPV zAOAU+U1ol^7J2Wlq7vbfcfS^aMM@C^I21%&gbM%&0v;uGH~ocBM)}F#^&npST^M0d zb*VY=;5*ez-t6hCnMg;QbeFfB{X4B}*5zei$DBl84(_~z(e%|6et)4Vr6le8m4<%W zs?(^W*%gCjy*RHr^Rzh6_%Cz}aujBSsue^Z605n1i(jxE$yFLcEtZ<=xCVvCDdKj- z&JJ198U+~H@YJf@{0W;jOfI1 zY>INctkZ<#vIBDx2+Y4!)t5idg_RFfg?p>6o8MswVG4tSr2I7a1Le^e<5;LU9R;pU zZ>=@q)<>q*rg$ahOm4I6VlBp$T+D1pc^DBwN|spEJ}=^h$kIb^GPGS?8_{Ja__8v_ zt%fo>Fu6&UP0I}!EukegZce^^iWw@4s86*2^Ubbwu8T!~51}8T+H&-u@q232!0|Vg zLz0WBr>6);%1A=HEG_9Ho`+fVcR(mHwe&2pOZq)kP0o2E@3~@K$KZKuqP1L2S}n

+p?`Me)>`WtWPG- zyT_+lm;UBaN`uSyJ!q@dQ;XogN>iR9O8+I%Veb@It*yQ3ej`|yzDgsl$ijtdvigRO zzP&ohZGjyH4z+}^z@)Q_fXLbTEPR2U_yoX(uDnVr(lu2%&X=P>G0Z)Y!|)V45b_6n zd;(nzwH-S&kwg2-_%VY&Y(9UK2a!24PC`h)80UF(D^$%~b<1EaH=J{FMeUrO8Z)A1 zMq>>*laC+(xKaS1Xd-_gW+*_yW3iUg`LAR+Dvq8eCRZS~)1ow$HNyONY=5nx<4O5< zEso^BAET^IH;*&F{X8>xad2RFR$hR=ncJ# zT&teS^DqMA6Bn>TX{`DelR_HKrmtHN!j3mi^nZyA{D zooU6kSY-%y43{8aWFoL`IY$n zf>z3(uO*K*xPLs8$+G=!yz=qoZvf^J0gO=35=S1wb`t^Bv{pmr?Zh8N4uSl9h7mKK zY6SPc^E3+{zuSYeT1_Q3o)Eij@DhTlQc3yrNw8P)1r)cDN2;rxCvc`f6g?6Dafm3j>Stc zz6XKHcX4dV?8WN}`1b2(R$s9SlzK246c4xxXN4XS2WEtNr5QoQkLXw|XHUu1oJ2CmAkRfEi`cL9dVU2-7O+uV@tPq3u`*#D zV~6!t)4p5M0bw)3YyFwETpuOaBNsI&X!w3ndtBY@9@|B>@Wr2|XYu;}ll-Ok@dNeh z^Bn|cVyt?ik!RR_wPm90k*gesTd5cSxVqQc5a>h$%PwVr0HFmxTVL=653`rdWhizz z{LD5l$|}XfXLxmd?OMLng(Y#NkJ#L0fI=moPd*bwaQua6_!9q@BC*(MALfm-U zB{^K-4qgu!9&n>mng=WFVe!9C(#*(`mY;1-h?3$X*HJX&CULp>+1m!zBd!WLW~_*O z6#}**9Bt4r#+>m=;nQxK5y!VfMhTp@&7oK9x0CP$=cfZvFmo}%*q2^9!An;?uHG!K zYs?K!j4(=iAr6<3*jGe0X|{%#K7mxSGGO6MJb1;i!}Q#S{~R`FRrap4+-=vUf{W1i zpMKMwMh^iI!n0qyn%>_2R8qAfz&KT1F=p}TvC83bX;tcD`UY7BSeUgvDm=+$x@cjRKh_SYfg=i>cj4oilmvjfc>A8uW_@|Ij>1Qtx@ zM6VK~oQCbnpabODZum@qXM;zI-BxprOZgC;QZL#nv*iYoa<|W2`EhUxxhJ}ZcZaF zQrVppOGgtO7~Q*We(mmo2$m>SZ{b#&aO;yH8Dh$(bg!k&W*_B9V-8x*F7*cdIB0i( zMU_j!%1uf-Ig;G2Yy-1;fLpbf&#hS_SvNcC$x5mM7t9XEq^a_!mDI&ZsH_jfr+udt zp;1eQlQMuwWrC*8zm!VD+TY*+C;QC++&e#7n(X7H;bKG0B+J7euh%VuLLClq^J+>9 z=0rsaX7>_N0YI;DsJVa@%LM1A>O(r4dA2hZ?lM!+wg}V*BT1RB>lt-XxkW^iG6V=9 z&fTu`Kjp8wq7RaHyh|BpI}?3gwQBj&$XI;{!{4&$dORjs5fK4 zU3b_xZk_UicX+w41vagD>+t=So+>?hdAy9nHki7LX>PHqi=h9QcTru!`&iC7nI|d_ z9G6iJf?f46V2N;{p&{^W1VL4WWv3UPm%qFyV=%W?99WCr4}F#N-t+wO+w=P!Uw={~ zUmS;es{EaRLl}0NuE_5>sMDx9qpizEG-)fLOF}wA`g< zImW13XOU=$KUg%b6;eSUjPZs+2q4r%;1wbOTOj!tv1jhIMpnL=jcU$rdBxMF?WGb$1tQBv*xd3uUJ|pqJ;akF=GU$RFq)uL4oeXPqR@ni`KqU(x0N>z zD#bI$+4MiGs@AtJPRM<=vtIogOlbG!LnmwhyDJ1AldQ-uX8<6wnXdR--sZ8 zD+YqD0R8^bK>lVr;xEy%yge)o8!zN>U)F)*vJ*s##Tm(4LmoTW;6&rhdcLNb_T5se zr2b3k{}a)#=0q<*0;6q(nm!gOC6w6>jqWvI=2$WtBIKbTdL&>9w- zSW@Q$^*Pvb`L>$Ap1C03209xxUn`EQ<~pGQ9(=2|eH^oMa|ZwDn!t2k6q~&FG_-RS^o}&z;26F0Mw_;2o)?E+VC6BnLyoy9#X~OB4*fC_Y z+|iPYtRvyBQD6abD3)& zJI%?)<6#_ALfwOdREzfKyp~aJuM%K%g<0u|t9};gbiH3H#2>h2lqVcb#~*6~fH|=W zstKWkne8RAOS#ChJWnEqlL#KDug*-1xg5(3s761!K}S1Qq9g!iX?0wzo3%(x(pHM_ zX%RY?I!msGkT@H)?(L7aS&a>TgQVZA%%5rU3q&5RnT`dBLyY_LeaCo(b-|@xh0p;m z(=}hv-E+11`thXDkO{r*hwx8AV=7wYQv7Ne-FZwCA?Oa>fXc~!hN9W+rG=Q&Njl%v zFjNN&U@y(IZw(K$P@{rJ+k{RA>*;x~5lJM3fZ%b(f+EhzBxs~v~FV)*XSa(3XnD4abir>@(3<>47sYdoK z>1Zc3(m6J*(wxWM$}D8!h;`0^wO!Uhreu!vR*IU=3wyKOa1gHTI?w?B-gya-Le48s zN&T(F?=BxJYPOzgxJT5FWq@D(us@RLfph|wk^Y{hYxhuBV5*E@)r1Lch7<9Z;|!{S z!DS)8bnIU7~~ z150K*&Sk?K#nEpwj4ByO#kK-z=NmjJ{;L7mFzA$sp$$|o%~9|WXe-vIfIeD;?Z945 zj@2F5R9njc~7^n0CZ;oz~|9C+r7CLJ{MT+dhc`_ zD3&bRG_t2g7SNu@J9FpJ_R9y~s#p}2#PW-@Nvv-6g4UU*b@M@SS1tH@a9AH3&j8s@ zzty_n`b=ghf8k=6{^v%9qAhypi;d4vsL^V5hb&wBZrN@XkoW4+!~aw5z7R+IEW<-I zE>T@xAC=P-;xhajvywG8h)miK6=;mIarU!;Y*!mHlFDI30%Ik#QTE!YRrL15J=S1) zueWM26HRINaykhdIy)K+grv`^80}hYor@g*a;rUlTlH9_X;VOKIZaE`Rn)Tj{a&M) zXkuTCe_9DA)>fnj&*_-m%~d@^Ta-AysCQAf@!@W&af6MvL)m4_`fFDzYHV>_cNY^q ziBNeHUVS0LpI7MFFVQZncz79`h@aDSWbi20ZPeznhqpeS*dub0Df7r&m(GJb#{?eX zhz(-XBYc6F&26iz^RAf3B9r~vhZxza-blWH)sVjxJchoIo^$MQ3W&iAf+S+e{e-&L z8#`bIt04yH|Rv57?MBj zaa%EO_mpb1RB&TI?chyUnKQhPR}m+0Mx#l|0W19)Cys5I3Ry9(?>X;mpDS5xY0SWM{ed<87heS z7cC@CR2OjyK!U(Cb{OK)m)S)e(D%zC&+KShZrp4sTIGH~Aaz4V=U{p%!saaqZA>K# zG=4B>IJ>#1s#le8H{L7r(8@A<4A1KIRNs)R2m%v7A75T zLpBn>MzDPzS#Ol`i}=$xG@1+^+NmFP8_&p=JBb7@7wz z1~O3(8`rPVj?4_D>pPY(=uew%YVteGB4V#&KISaTSLl5j#iPoC@`v^zE$NVpDlH9r?p@ ztw_B@!f${na2N#fj2g=sU%D&*VkGY&lTk}tNO2x0JVI3~nOc}^<*K6iSa)OIA7#+xS z(7|&T9NQE-!B|kW5ccJi75Q=y?{;yD6zoasAT(-fxm7hsOTp&Q#1O1Fn75~tI*gz| z4fWrD&CI_m>XRfE3QvX>ZPof7E<|kDqGEe!8Bw`OxtCV6u@}Trl{Q+s^=g~)?rbDf z^Dv06^B4g!*9cOz|Elr}qyiC_-*l9g`}rw9x4nHjHQ@Tsc&rP*m6R9nDs5c!2VogC zu#)Z>gwmHT!UaG_ojdC|(ot_xmGj85WqTJT-7(oPldZ40tRW2jmg*nhIuA>SWOxO< zDAd^&mqhI;g-5E2FI;NIadX~VsE?3?@7J9@{92;y6BpAMWgVnq!K zI9N{D+Q+RBwuQYqTeJc=R0xi(Amge#K!@C}3?c%-8BU(7%MTYKRgXa%p;7->=K)NV zscatw4NNftOWqAvI`~h$jwKe74w_(XH6+5+>yj3m>J({nMN6({_tDX*V~F$-L_&H% zQ#00IsR1qD(QkfM5Smq)TKN3XoOriEc>Ao93g|KYuR8SAxPPHW{a-++|GN)_REYao zv{i1`H5<)z<3!{CoqFVW?`6T@Os)Z!Ww{ekyrau5>N^-r^W@edKKcq_9 zTFV);GD1KvvvVq0^5@Z!?9W{%>n}%zri?TddRm;jT6!2P^ka@XrWC1XINN>=&%tRD zQx^8;D~0CP;I~yIKSkaNukb6ALIH^iC^qLrDxZx=4f$v9qBWXV&CP4Ujlg*1wE zT(E2aq+1=0MiZI*L~av)nPLAf`zhDB>Lo5L#OWPz=NG-8^rD|NnfD@9DDSXOytasB z>+%yyKGrYrQbA!_e0NyLD)>xwpp?9d(_GMg8X;{pHd(=I_04i&Dz$!_=AxAp?C+Yb zJFW8-wb&&YptiO$8`sMrI4Ac*_NDS4%OfNoN>x64zLR-j{Xq5d>)if)faZmr zXeV(9QZB9^3*_gTNR|B0AJWq$j4wn)UZ2{7^~s00I{FFb8`0}Hz+gh@w(Xv^?U!n%kAI)dN08%b_P3sz zhtmblAG4E2?JB5(%y82Hgfnn-M z+QWZ@eL}=P!~S&VbiG#}*#0er7QKvPe&un5vwaHv^XPC@Qxh~rk(+{OEmQxbe*di^ z3jD`2%JKGpS+$D{zI;i<6s z(Hm+{V+5>?{#|7Ga}eLLUg2+Z{Tq+$7k=`W3b2FVIsVcXAO8PO!oQ$-XGt9LwOx$j z@bu+H;l_q@Zfk6uZPLqK^WpqBdUKJZnVb|=)jQ`K1cZxbO(ACtXTf>0p3eJ>ZGQj` zreyg{g!Igca=zvy~pZvJC`FMZaDvZ{J4t{s7oMG77o$DY@<4 z{HyKZ;)lD(SBR3Wo2j2<-z@c1YcA`Rd8YU|_Evd&Lh%pyg_%0x9wICJD^6JU1 zqHc#4U*NMg)B>ZrT7!wvO{o~JMo2oOi?dMG7+|Qsv$|ith^nh^ibSP7$A1pJl+V;v z`JlFDDK}M@n={*&=JtL_Yfwp_D2<*u=Su`-c9!tnG-|WZGQZS)=i3=U~=C)XP z=gh0aO=}$Txi~F`f|G4oX7gx$zvJ!qU$@>Ql`YTp4QD-HuHT@%*G8B6wEkUV+#}Ax zgzd(FjmiVOm&GnvwL~ubWw$-;r6V`|(!g;nM=F?cnbfk3es-t_MN1Z+-7w5Q$wk51 zTJ+x3X^=inTrRuUwv>(1R0g~ze}{zLviRpdXm?|6btG!5^3K{F&IS0~7tdzLZ)hjb z7dQFz0Nyx#hBcNf#{miO5}Gt7a=FCaM|3q_l<{nzqWN2NBE>U|EA{oE7EPcICdWWB zsc_`FJ|V_@I1rN=f>C>4qb+&v8~;rk6bBq;;v9?wT_e)q1=P2k34g zJTbQ`Ku}o$GJ;TUX_z~F2Zq@O^f>k<$hBAhP+x9YlM#^OiP7UX!nu)n$Bt-Z7DXCtx@i=aHcIJe@|H)SW-7FWc+kTwKug&W~LL)Cdqku9!!R7e| zri_BDM%Z>puc5qa3m6sC?rt+TnczfR-h^DXZ3<%*G<~i&chxbIO+)Xp>!G?-9BuSWcy@<5ejq9b;hC~T*z!%#FL=v&O zOYF60sLLQ7O^d~cZH^0VC1Tp{1FzTUlGeC~9Kp#V^07B{Nf3}+C6f(u&R~MN?u`H@ zot&>!bub5?RXV2LiFCf)`v1pLt`Dp#c{uAn-Nf8C(|i z(afaUDzrtm>$H<$;@8Hx;BMJLxVw!$Z)W|6=i*tCm}65HD(=Ogs9s|-x z3VO$`ZX0>+A7uAxHc$k!N{4IaX(td_MrTO7kfYvlI*0P*(yeH!mo z{K=y>T#d0H_O!Y2PRY&nLT{L=ypQ#XPAta?NHqGs#1 z*shQ1veiNL+F-+qM%<0F>D1(VHb`SRKu`%QV*@KZ~ zYM@cs4JSAPUjzpNVS2mt#!E_Q%;D-@Oaxo|vB9|3RBuPK`G=0$3Tp&Z>MU&Vz6B4< zsz0J!NTQZ|L`;!bKVQ%`j>%NApgK;s7?2u?2_m;$)+6AT08HRYQlCX86?mnTV$-3v zrD~>p&=NS>9;hUsCuD;Wx%bQnJ zrz{O?CK}ngYUouY^l62hJW3MK?!hk@lZ%dalYLd0H7}hfmbgj0vF>Uhl8r5~ze$8@@_D2qC#99ot2sPtuYV%GihZPN*Xwx2j$ffb+uJ!m=zb^DvevH_{?$m|`-0kfxNQSK9zvu^ zn)G0Gv14Pt!D?Awr-lxEV%Vs$Xllh7gz3+^maizO0oT99HKhOgn0OO=b$pk6JxRyR zrMuCUb=crgr{msiJ&(AHbG4~6Yf1ybX-~R74+I3cLD^5oWC{>3p(;|^wT<`#AYz5D z^P7DjI6C1RwYJ8n0_N;*aCdI3|371f8gD$>e4$3{E{Y z-Rv^DkbiJZ5cbTSInBo;Q8^{oONf5SJ?@3Kz&o6UUnfMJo(`nURVQ#TMtCk5%EBHPTTM3CGwHF6*YaF% zc28X>Al2H^tT!Oyr*2M>2Y{~?k`vK=WWly$NDy8}<`1kDw5+#z%VsQv_zOb6J!DoC zZoJW=c!NzkGN~>9UFM#jG&w!f`~X&!wG+M+t`ir5#4|xb0P-tv=(hpxC+&TVYE?5G z4|kS51FVH@#qPbqzs(C=reA0A#lp?NL}k^h-e6_xI7@71p~Y#%iB&4k zvZ8;cLrs?-hn12h1|kc`Zt`Ssaz>z^t^-U^z$fssuQvp27JZh^BFz^JtNLDs@n$r# z$@qz}-*e;NeV-#KSYRmB2!p~LlkzBifpiG9HAadweP!sX-<|lbHl?HG+WhQ|z%}h{ zS*K0rQ%C;8{i;o3UgzS*t%r6jQi9d93+U?F{37Kj)yMp%GC-=pxvG)PhgnY))rS`t zwD=P0m&cw?$42N0Z|JS-%aPsoHUSP7FWgu-mdc?TM)Tu7D*Y&>Vc8=(c`Aii*YZA5 zsg92_5W=UW9gg^4Cs7l`-Yp}gF2v1K?HszXDk!tnAQAh~Yz%vc3qMygg3?SD!iLl9 z>eJn=g|5t+n6hA`0OmCo2|)oT~w;7B5XTv?qg$4Q)*ic(KDz{9=iTR^3G6% z8&@2^lpU7-r!i8lvxtS%RCD+bK)M;c-o=5ZCfnT{+Q{w3la6|TXykG3*?lrAAm@E! z!c^NaMmil`>c7YOOSm~6Y?X~lvC_m_yYkpn_b--$aiyF_3&;U zZj@A+xS1+|`=$felP3$tVW?-|%Z%?1>Bq&D>cOCU8qN1KD^C?>*9yf~wLYK%fI4VF z<5*2&rfz_KFewH=g7n*G*Jp0O%eJ=gLuuZKm~XXsTAYs}am%=+-#@Z%Zbz`oQ5Akb zndTwz2Y?oUG%#FZZ}JfrWz0H~wqs@yU~Fz#)mf1XY_xq!>(ygAN?lr!Z>rO~DqTjh zbk|0imTHLxgV^)J^ ziQ6LX09=%(C)%CFEB(`K-Ns?!Z=LS)zJ-5OsUmNius24;>g1M463)>G9ufkuT5rZ% z+e53rPk*oTYoI7)A7jSaS$)L6%@)34O(Qc8W2D7#t8t-KdkLEimunEz;=3p#_yjWO z&}5;atMlNl@32MY%8^E`7|QR2QTB^ZpKK;=rb_TMLSBC%8=yzVv;#1PgTc!+0~}&H z096e*o2qA`l(|4<4Hu7x4H%`4T!@a^n@(x23Qo1hI>z7Vrv*=A3T^>0-pmuJYVh@# zNl9(Bj~5P|?;QI#D~J;D8;-K1^@IAb8i|y0=J8wzt>OqTS3!;`MJ-G{U5#x6PCYWR zw(sIbIx^wjVui25);#`oLF%XL<$7up?oi17!_spJC@1O9MteS7oC?Z)wPqCHV!%Hc z@1scZS|YqjQ9~ap_*Auau9e*iu{KGPp9HnJnIM|ReOP~GTR}9dFQWlqHYh;ZGuvC4 z*)q(Mcd+)zTA`_=U|?(aYzR*v#xYphU{vZ)Voxv{KolBCM^19UM%-B1yHdz7HZ{Kz zb6X=v+m=yiPEXRpHPOtCcgmUOem1z+zdX`pthLp|gofY%7Dz*#F?B{|W`?cwZ=OPV z-TP7cl39_0uEL?dTB^;Kw|D*uiDG7+2tK|vc&FL7{!$9^fdKM?MlMQ`@;2x7A|(KN zu7P%kw6Zn2R}}f+;?Z`E_xM}o-ztLsJ*xy?^k5Fk%~o!i>&*WmBkNqmVh8gAb483xatX|;22F0UEq zGbEI(aK?o`t8_wgQ4>+Y9B3w$q0mQ7=6k-UWZpIKQf_7Jg~zu-yKHeR^R1 zNx_AD_?eBn9Z#pcYuGm*yOz~od@6sE{vNl_6(8vg-D;hFr5)lu_VZ}1%ctW~?>F+F zBec{`2Qp5SXHagRP^TA<-fbBFMjHMB7#`h|x>hYyle(7Ia67c6X8GtUSUvrX&Q1Ph zU}u=}$gkvK(x0D;{f(S`eB%EjA#y*_K_)WVf!2ap;vi3po`=&+SDp(D8no3RLnD`z zk_t-;p4;*N{F8;>xG^Zc>Ui{Z`;?hvuGwozTSons3SKO?8CHgi;3eQsbD<(v$10P< zuL}Ou-GEB>(Je|>92m{;&+bX z3$238@*chWpP0&E`TJ~(Yz3DdyjeTkKfCDad+{e(`ur+!Wz9NsSm1D+RriZItIEu1 zB&$@rBjv;}5oEvwezx)yK*{U?4IAMm4F}R~e(00_;`NMCIWAFY+8vUSYF?au` zqGZDbfJAZ)By(>~+eeeJ0~JKp5PTOM*1zy)lzKquOLI%UR;Xg5Om18)-{A z_x4}!`X7}3{6B0Ne<`AWZ|Jfdm-(-rbN<-WJ7?SNkp9*Y^7GZFW1-*3vOfS@|1h1m z3!`QW(b%;7nC6WIow0np&P%UxnXC~1m}sd%K_C`P-AzaOk-qWY+j_}Yk<9u3ZQ*2c z{mS`NbJW@N%iFaY|Gwr@+&k~-Q7G2Ax+BTReaT&oQ=9e%#J|uOm;&ZxR-++6XTN^Y me|5mT`m|;$M0`{-prN4(ku<83s*WuQ&ib!!P!ayI_ + + +# Use case directory + +The use case directory contains the `yaml` files to test the multitenant controller functionalities: create `lrest` pod, and create PDB operations *create / open / close / unplug / plug / delete / clone /map / parameter session* + +## Makefile helper + +Customizing `yaml` files (tns alias / credential / namespaces name, and so on) is a long procedure that is prone to human error. A simple [`makefile`](../usecase/makefile) is available to quickly and safely configure `yaml` files with your system environment information. Just edit the [parameter file](../usecase/parameters.txt) before proceding. + +```text +TNSALIAS...............:[Tnsalias do not use quotes and avoid space in the string --> (DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELA....] +DBUSER.................:[CDB admin user] +DBPASS.................:[CDB admin user password] +WBUSER.................:[HTTPS user] +WBPASS.................:[HTTPS user password] +PDBUSR.................:[PDB admin user] +PDBPWD.................:[PDB admin user password] +PDBNAMESPACE...........:[pdb namespace] +LRSNAMESPACE...........:[cdb namespace] +COMPANY................:[your company name] +APIVERSION.............:v4 --> do not edit +``` + +⚠ **WARNING: The makefile is only intended to speed up the usecase directory configuration. Use of this file for production purposes is not supported. The editing and configuration of yaml files for production system is left up to the end user** + +### Prerequisistes: + +- Ensure that **kubectl** is properly configured. +- Ensure that all requirements listed in the [operator installation page](../../../../docs/installation/OPERATOR_INSTALLATION_README.md) are implemented. (role binding,webcert,etc) +- Ensure that the administrative user (admin) on the container database is configured as documented. + +```bash +make operator +``` +This command creates the `operator-database-operator.yaml` file in the local directory, and set up the `watchnamespace` list. Note that the `yaml` file is not applied. + +```bash +make secrets +``` +This command creates all of the Secrets with the encrypted credentials. + +```bash +make genyaml +``` +*make genyaml* generates the required `yaml` files to work with multitenant controllers. + + +![image](../images/UsecaseSchema.jpg) + +## Diag commands and troubleshooting + +### Connect to rest server pod + +```bash +/usr/bin/kubectl exec -n -it -- /bin/bash +``` + + +```bash +## example ## + +kubectl get pods -n cdbnamespace +NAME READY STATUS RESTARTS AGE +cdb-dev-lrest-rs-fnw99 1/1 Running 1 (17h ago) 18h + +kubectl exec cdb-dev-lrest-rs-fnw99 -n cdbnamespace -it -- /bin/bash +[oracle@cdb-dev-lrest-rs-fnw99 ~]$ +``` + +### Monitor control plane + +```bash +kubectl logs -f -l control-plane=controller-manager -n oracle-database-operator-system +``` +```bash +## output example: ## +2024-10-28T23:54:25Z INFO lrpdb-webhook ValidateUpdate-Validating LRPDB spec for : lrpdb2 +2024-10-28T23:54:25Z INFO lrpdb-webhook validateCommon {"name": "lrpdb2"} +2024-10-28T23:54:25Z INFO lrpdb-webhook Valdiating LRPDB Resource Action : MODIFY +2024-10-29T10:07:34Z INFO lrpdb-webhook ValidateUpdate-Validating LRPDB spec for : lrpdb2 +2024-10-29T10:07:34Z INFO lrpdb-webhook ValidateUpdate-Validating LRPDB spec for : lrpdb1 +2024-10-29T16:49:15Z INFO lrpdb-webhook ValidateUpdate-Validating LRPDB spec for : lrpdb1 +2024-10-29T16:49:15Z INFO lrpdb-webhook validateCommon {"name": "lrpdb1"} +2024-10-29T16:49:15Z INFO lrpdb-webhook Valdiating LRPDB Resource Action : CREATE +2024-10-29T10:07:20Z INFO controller-runtime.certwatcher Updated current TLS certificate +2024-10-29T10:07:20Z INFO controller-runtime.webhook Serving webhook server {"host": "", "port": 9443} +2024-10-29T10:07:20Z INFO controller-runtime.certwatcher Starting certificate watcher +I1029 10:07:20.189724 1 leaderelection.go:250] attempting to acquire leader lease oracle-database-operator-system/a9d608ea.oracle.com... +2024-10-29T16:49:15Z INFO lrpdb-webhook Setting default values in LRPDB spec for : lrpdb1 + +``` + +### Error decrypting credential + +The following is an example of a resource creation failure due to decription error: + +```text +2024-10-30T10:09:08Z INFO controllers.LRPDB getEncriptedSecret :pdbusr {"getEncriptedSecret": {"name":"lrpdb1","namespace":"pdbnamespace"}} +2024-10-30T10:09:08Z ERROR controllers.LRPDB Failed to parse private key - x509: failed to parse private key (use ParsePKCS1PrivateKey instead for this key format) {"DecryptWithPrivKey": {"name":"lrpdb1","namespace":"pdbnamespace"}, "error": "x509: failed to parse private key (use ParsePKCS1PrivateKey instead for this key format)"} +``` + + +**Solution**: Ensure you use **PCKS8** format during private key generation. If you are not using `openssl3`, then run this command: + +```bash +openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > mykey +``` + +### Crd details + +Use the **describe** option to obtain `crd` information + +```bash +kubectl describe lrpdb lrpdb1 -n pdbnamespace +[...] + Secret: + Key: e_wbuser.txt + Secret Name: wbuser +Status: + Action: CREATE + Bitstat: 25 + Bitstatstr: |MPAPPL|MPWARN|MPINIT| + Conn String: (DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=pdbdev))) + Msg: Success + Open Mode: MOUNTED + Phase: Ready + Status: true + Total Size: 2G +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Created 108s LRPDB LRPDB 'pdbdev' created successfully + Normal Created 108s LRPDB PDB 'pdbdev' assertive pdb deletion turned on + Warning LRESTINFO 95s LRPDB pdb=pdbdev:test_invalid_parameter:16:spfile:2065 + Warning Done 15s (x12 over 2m25s) LRPDB cdb-dev + +``` diff --git a/docs/multitenant/lrest-based/usecase/altersystem_pdb1_resource.yaml b/docs/multitenant/lrest-based/usecase/altersystem_pdb1_resource.yaml new file mode 100644 index 00000000..0467a948 --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/altersystem_pdb1_resource.yaml @@ -0,0 +1,50 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + action: "Alter" + alterSystemParameter : "cpu_count" + alterSystemValue : "3" + parameterScope : "memory" + + + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/cdbnamespace_binding.yaml b/docs/multitenant/lrest-based/usecase/cdbnamespace_binding.yaml new file mode 100644 index 00000000..5fd355f4 --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/cdbnamespace_binding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: oracle-database-operator-oracle-database-operator-manager-rolebinding2 + namespace: cdbnamespace +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system diff --git a/docs/multitenant/lrest-based/usecase/clone_pdb1_resource.yaml b/docs/multitenant/lrest-based/usecase/clone_pdb1_resource.yaml new file mode 100644 index 00000000..2c4afe13 --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/clone_pdb1_resource.yaml @@ -0,0 +1,51 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb3 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "new_clone" + srcPdbName: "pdbdev" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + pdbconfigmap: "config-map-pdb" + assertiveLrpdbDeletion: true + action: "Clone" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/clone_pdb2_resource.yaml b/docs/multitenant/lrest-based/usecase/clone_pdb2_resource.yaml new file mode 100644 index 00000000..16255a87 --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/clone_pdb2_resource.yaml @@ -0,0 +1,51 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb4 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "new_clone2" + srcPdbName: "pdbprd" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + pdbconfigmap: "config-map-pdb" + assertiveLrpdbDeletion: true + action: "Clone" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/close_pdb1_resource.yaml b/docs/multitenant/lrest-based/usecase/close_pdb1_resource.yaml new file mode 100644 index 00000000..87f7383d --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/close_pdb1_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/close_pdb2_resource.yaml b/docs/multitenant/lrest-based/usecase/close_pdb2_resource.yaml new file mode 100644 index 00000000..0743bd8c --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/close_pdb2_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb2 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbprd" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/close_pdb3_resource.yaml b/docs/multitenant/lrest-based/usecase/close_pdb3_resource.yaml new file mode 100644 index 00000000..6c6ca519 --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/close_pdb3_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb3 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: ""new_clone" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/config-map-pdb.yaml b/docs/multitenant/lrest-based/usecase/config-map-pdb.yaml new file mode 100644 index 00000000..2769b498 --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/config-map-pdb.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-map-pdb + namespace: pdbnamespace +data: + rdbmsparameters.txt: | + session_cached_cursors;100;spfile + open_cursors;100;spfile + db_file_multiblock_read_count;16;spfile + test_invalid_parameter;16;spfile diff --git a/docs/multitenant/lrest-based/usecase/config_map_pdb.yaml b/docs/multitenant/lrest-based/usecase/config_map_pdb.yaml new file mode 100644 index 00000000..2769b498 --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/config_map_pdb.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-map-pdb + namespace: pdbnamespace +data: + rdbmsparameters.txt: | + session_cached_cursors;100;spfile + open_cursors;100;spfile + db_file_multiblock_read_count;16;spfile + test_invalid_parameter;16;spfile diff --git a/docs/multitenant/lrest-based/usecase/create_lrest_pod.yaml b/docs/multitenant/lrest-based/usecase/create_lrest_pod.yaml new file mode 100644 index 00000000..b80c1c56 --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/create_lrest_pod.yaml @@ -0,0 +1,44 @@ +apiVersion: database.oracle.com/v4 +kind: LREST +metadata: + name: cdb-dev + namespace: cdbnamespace +spec: + cdbName: "DB12" + lrestImage: container-registry.oracle.com/database/operator:lrest-241210-amd64 + lrestImagePullPolicy: "Always" + dbTnsurl : "(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS)))" + replicas: 1 + deletePdbCascade: true + cdbAdminUser: + secret: + secretName: "dbuser" + key: "e_dbuser.txt" + cdbAdminPwd: + secret: + secretName: "dbpass" + key: "e_dbpass.txt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + cdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + cdbPubKey: + secret: + secretName: "pubkey" + key: "publicKey" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/create_pdb1_resource.yaml b/docs/multitenant/lrest-based/usecase/create_pdb1_resource.yaml new file mode 100644 index 00000000..fa58d36a --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/create_pdb1_resource.yaml @@ -0,0 +1,52 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + assertiveLrpdbDeletion: true + fileNameConversions: "NONE" + unlimitedStorage: false + pdbconfigmap: "config-map-pdb" + tdeImport: false + totalSize: "2G" + tempSize: "800M" + action: "Create" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/create_pdb2_resource.yaml b/docs/multitenant/lrest-based/usecase/create_pdb2_resource.yaml new file mode 100644 index 00000000..02d5763b --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/create_pdb2_resource.yaml @@ -0,0 +1,52 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb2 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbprd" + assertiveLrpdbDeletion: true + fileNameConversions: "NONE" + unlimitedStorage: false + pdbconfigmap: "config-map-pdb" + tdeImport: false + totalSize: "2G" + tempSize: "800M" + action: "Create" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/delete_pdb1_resource.yaml b/docs/multitenant/lrest-based/usecase/delete_pdb1_resource.yaml new file mode 100644 index 00000000..1a3c328a --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/delete_pdb1_resource.yaml @@ -0,0 +1,45 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + pdbName: "pdbdev" + action: "Delete" + dropAction: "INCLUDING" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/delete_pdb2_resource.yaml b/docs/multitenant/lrest-based/usecase/delete_pdb2_resource.yaml new file mode 100644 index 00000000..747641d4 --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/delete_pdb2_resource.yaml @@ -0,0 +1,45 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb2 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + pdbName: "pdbprd" + action: "Delete" + dropAction: "INCLUDING" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/makefile b/docs/multitenant/lrest-based/usecase/makefile new file mode 100644 index 00000000..4203baa4 --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/makefile @@ -0,0 +1,911 @@ +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# __ __ _ __ _ _ +# | \/ | __ _| | _____ / _(_) | ___ +# | |\/| |/ _` | |/ / _ \ |_| | |/ _ \ +# | | | | (_| | < __/ _| | | __/ +# |_| |_|\__,_|_|\_\___|_| |_|_|\___| +# | | | | ___| |_ __ ___ _ __ +# | |_| |/ _ \ | '_ \ / _ \ '__| +# | _ | __/ | |_) | __/ | +# |_| |_|\___|_| .__/ \___|_| +# |_| +# +# WARNING: Using this makefile helps you to customize yaml +# files. Edit parameters.txt with your enviroment +# informartion and execute the following steps +# +# 1) make operator +# it configures the operator yaml files with the +# watch namelist required by the multitenant controllers +# +# 2) make secrets +# It configure the required secrets necessary to operate +# with pdbs multitenant controllers +# +# 3) make genyaml +# It automatically creates all the yaml files based on the +# information available in the parameters file +# +# LIST OF GENERAED YAML FILE +# +# ----------------------------- ---------------------------------- +# oracle-database-operator.yaml : oracle database operator +# cdbnamespace_binding.yaml : role binding for cdbnamespace +# pdbnamespace_binding.yaml : role binding for pdbnamespace +# create_lrest_secret.yaml : create secrets for rest server pod +# create_lrpdb_secret.yaml : create secrets for pluggable database +# create_lrest_pod.yaml : create rest server pod +# create_pdb1_resource.yaml : create first pluggable database +# create_pdb2_resource.yaml : create second pluggable database +# open_pdb1_resource.yaml : open first pluggable database +# open_pdb2_resource.yaml : open second pluggable database +# close_pdb1_resource.yaml : close first pluggable database +# close_pdb2_resource.yaml : close second pluggable database +# clone_lrpdb_resource.yaml : clone thrid pluggable database +# clone_pdb2_resource.yaml : clone 4th pluggable database +# delete_pdb1_resource.yaml : delete first pluggable database +# delete_pdb2_resource.yaml : delete sencond pluggable database +# delete_pdb3_resource.yaml : delete thrid pluggable database +# unplug_pdb1_resource.yaml : unplug first pluggable database +# plug_pdb1_resource.yaml : plug first pluggable database +# map_pdb1_resource.yaml : map the first pluggable database +# config_map.yam : pdb parameters array +# altersystem_pdb1_resource.yaml : chage cpu_count count parameter for the first pdb +# +DATE := `date "+%y%m%d%H%M%S"` +###################### +# PARAMETER SECTIONS # +###################### + +export PARAMETERS=parameters.txt +export TNSALIAS=$(shell cat $(PARAMETERS) |grep -v ^\#|grep TNSALIAS|cut -d : -f 2) +export DBUSER=$(shell cat $(PARAMETERS)|grep -v ^\#|grep DBUSER|cut -d : -f 2) +export DBPASS=$(shell cat $(PARAMETERS)|grep -v ^\#|grep DBPASS|cut -d : -f 2) +export WBUSER=$(shell cat $(PARAMETERS)|grep -v ^\#|grep WBUSER|cut -d : -f 2) +export WBPASS=$(shell cat $(PARAMETERS)|grep -v ^\#|grep WBPASS|cut -d : -f 2) +export PDBUSR=$(shell cat $(PARAMETERS)|grep -v ^\#|grep PDBUSR|cut -d : -f 2) +export PDBPWD=$(shell cat $(PARAMETERS)|grep -v ^\#|grep PDBPWD|cut -d : -f 2) +export PDBNAMESPACE=$(shell cat $(PARAMETERS)|grep -v ^\#|grep PDBNAMESPACE|cut -d : -f 2) +export LRSNAMESPACE=$(shell cat $(PARAMETERS)|grep -v ^\#|grep LRSNAMESPACE|cut -d : -f 2) +export LRESTIMG=$(shell cat $(PARAMETERS)|grep -v ^\#|grep LRESTIMG|cut -d : -f 2,3) +export COMPANY=$(shell cat $(PARAMETERS)|grep -v ^\#|grep COMPANY|cut -d : -f 2) +export APIVERSION=$(shell cat $(PARAMETERS)|grep -v ^\#|grep APIVERSION|cut -d : -f 2) +export OPRNAMESPACE=oracle-database-operator-system +export ORACLE_OPERATOR_YAML=../../../../oracle-database-operator.yaml +export TEST_EXEC_TIMEOUT=3m + +REST_SERVER=lrest +SKEY=tls.key +SCRT=tls.crt +CART=ca.crt +PRVKEY=ca.key +PUBKEY=public.pem +COMPANY=oracle +DBUSERFILE=dbuser.txt +DBPASSFILE=dbpass.txt +WBUSERFILE=wbuser.txt +WBPASSFILE=wbpass.txt +PDBUSRFILE=pdbusr.txt +PDBPWDFILE=pdbpwd.txt + +################# +### FILE LIST ### +################# + +export LREST_POD=create_lrest_pod.yaml + +export LRPDBCRE1=create_pdb1_resource.yaml +export LRPDBCRE2=create_pdb2_resource.yaml + +export LRPDBCLOSE1=close_pdb1_resource.yaml +export LRPDBCLOSE2=close_pdb2_resource.yaml +export LRPDBCLOSE3=close_pdb3_resource.yaml + +export LRPDBOPEN1=open_pdb1_resource.yaml +export LRPDBOPEN2=open_pdb2_resource.yaml +export LRPDBOPEN3=open_pdb3_resource.yaml + +export LRPDBCLONE1=clone_pdb1_resource.yaml +export LRPDBCLONE2=clone_pdb2_resource.yaml + +export LRPDBDELETE1=delete_pdb1_resource.yaml +export LRPDBDELETE2=delete_pdb2_resource.yaml +export LRPDBDELETE3=delete_pdb3_resource.yaml + +export LRPDBUNPLUG1=unplug_pdb1_resource.yaml +export LRPDBPLUG1=plug_pdb1_resource.yaml + +export LRPDBMAP1=map_pdb1_resource.yaml +export LRPDBMAP2=map_pdb2_resource.yaml +export LRPDBMAP3=map_pdb3_resource.yaml + +export LRPDBMAP1=map_pdb1_resource.yaml +export LRPDBMAP2=map_pdb2_resource.yaml +export LRPDBMAP3=map_pdb3_resource.yaml + +export ALTERSYSTEMYAML=altersystem_pdb1_resource.yaml +export CONFIG_MAP=config_map_pdb.yaml + + + + +##BINARIES +export KUBECTL=/usr/bin/kubectl +OPENSSL=/usr/bin/openssl +ECHO=/usr/bin/echo +RM=/usr/bin/rm +CP=/usr/bin/cp +TAR=/usr/bin/tar +MKDIR=/usr/bin/mkdir +SED=/usr/bin/sed + +check: + @printf "TNSALIAS...............:%.60s....\n" $(TNSALIAS) + @printf "DBUSER.................:%s\n" $(DBUSER) + @printf "DBPASS.................:%s\n" $(DBPASS) + @printf "WBUSER.................:%s\n" $(WBUSER) + @printf "WBPASS.................:%s\n" $(WBPASS) + @printf "PDBUSR.................:%s\n" $(PDBUSR) + @printf "PDBPWD.................:%s\n" $(PDBPWD) + @printf "PDBNAMESPACE...........:%s\n" $(PDBNAMESPACE) + @printf "LRSNAMESPACE...........:%s\n" $(LRSNAMESPACE) + @printf "COMPANY................:%s\n" $(COMPANY) + @printf "APIVERSION.............:%s\n" $(APIVERSION) + +define msg +@printf "\033[31;7m%s\033[0m\r" "......................................]" +@printf "\033[31;7m[\xF0\x9F\x91\x89 %s\033[0m\n" $(1) +endef + +tls: + $(call msg,"TLS GENERATION") + #$(OPENSSL) genrsa -out $(PRVKEY) 2048 + $(OPENSSL) genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > $(PRVKEY) + $(OPENSSL) req -new -x509 -days 365 -key $(PRVKEY) \ + -subj "/C=CN/ST=GD/L=SZ/O=$(COMPANY), Inc./CN=$(COMPANY) Root CA" -out ca.crt + $(OPENSSL) req -newkey rsa:2048 -nodes -keyout $(SKEY) -subj \ + "/C=CN/ST=GD/L=SZ/O=$(COMPANY), Inc./CN=cdb-dev-$(REST_SERVER).$(LRSNAMESPACE)" -out server.csr + $(ECHO) "subjectAltName=DNS:cdb-dev-$(REST_SERVER).$(LRSNAMESPACE)" > extfile.txt + $(OPENSSL) x509 -req -extfile extfile.txt -days 365 -in server.csr -CA ca.crt -CAkey $(PRVKEY) -CAcreateserial -out $(SCRT) + $(OPENSSL) rsa -in $(PRVKEY) -outform PEM -pubout -out $(PUBKEY) + +secrets: tls delsecrets + $(call msg,"CREATING NEW TLS/PRVKEY/PUBKEY SECRETS") + $(KUBECTL) create secret tls db-tls --key="$(SKEY)" --cert="$(SCRT)" -n $(LRSNAMESPACE) + $(KUBECTL) create secret generic db-ca --from-file="$(CART)" -n $(LRSNAMESPACE) + $(KUBECTL) create secret tls db-tls --key="$(SKEY)" --cert="$(SCRT)" -n $(PDBNAMESPACE) + $(KUBECTL) create secret generic db-ca --from-file="$(CART)" -n $(PDBNAMESPACE) + #$(KUBECTL) create secret tls prvkey --key="$(PRVKEY)" --cert=ca.crt -n $(LRSNAMESPACE) + $(KUBECTL) create secret generic pubkey --from-file=publicKey=$(PUBKEY) -n $(LRSNAMESPACE) + $(KUBECTL) create secret generic prvkey --from-file=privateKey=$(PRVKEY) -n $(LRSNAMESPACE) + $(KUBECTL) create secret generic prvkey --from-file=privateKey="$(PRVKEY)" -n $(PDBNAMESPACE) + $(call msg,"CREATING NEW CREDENTIAL SECRETS") + @$(ECHO) $(DBUSER) > $(DBUSERFILE) + @$(ECHO) $(DBPASS) > $(DBPASSFILE) + @$(ECHO) $(WBUSER) > $(WBUSERFILE) + @$(ECHO) $(WBPASS) > $(WBPASSFILE) + @$(ECHO) $(PDBUSR) > $(PDBUSRFILE) + @$(ECHO) $(PDBPWD) > $(PDBPWDFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(DBUSERFILE) |base64 > e_$(DBUSERFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(DBPASSFILE) |base64 > e_$(DBPASSFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(WBUSERFILE) |base64 > e_$(WBUSERFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(WBPASSFILE) |base64 > e_$(WBPASSFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(PDBUSRFILE) |base64 > e_$(PDBUSRFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(PDBPWDFILE) |base64 > e_$(PDBPWDFILE) + $(KUBECTL) create secret generic dbuser --from-file=e_$(DBUSERFILE) -n $(LRSNAMESPACE) + $(KUBECTL) create secret generic dbpass --from-file=e_$(DBPASSFILE) -n $(LRSNAMESPACE) + $(KUBECTL) create secret generic wbuser --from-file=e_$(WBUSERFILE) -n $(LRSNAMESPACE) + $(KUBECTL) create secret generic wbpass --from-file=e_$(WBPASSFILE) -n $(LRSNAMESPACE) + $(KUBECTL) create secret generic wbuser --from-file=e_$(WBUSERFILE) -n $(PDBNAMESPACE) + $(KUBECTL) create secret generic wbpass --from-file=e_$(WBPASSFILE) -n $(PDBNAMESPACE) + $(KUBECTL) create secret generic pdbusr --from-file=e_$(PDBUSRFILE) -n $(PDBNAMESPACE) + $(KUBECTL) create secret generic pdbpwd --from-file=e_$(PDBPWDFILE) -n $(PDBNAMESPACE) + $(RM) $(SKEY) $(SCRT) $(CART) $(PRVKEY) $(PUBKEY) server.csr extfile.txt ca.srl \ + $(DBUSERFILE) $(DBPASSFILE) $(WBUSERFILE) $(WBPASSFILE) $(PDBUSRFILE) $(PDBPWDFILE)\ + e_$(DBUSERFILE) e_$(DBPASSFILE) e_$(WBUSERFILE) e_$(WBPASSFILE) e_$(PDBUSRFILE) e_$(PDBPWDFILE) + $(KUBECTL) get secrets -n $(LRSNAMESPACE) + $(KUBECTL) get secrets -n $(PDBNAMESPACE) + +delsecrets: + $(call msg,"CLEAN OLD SECRETS") + $(eval SECRETSP:=$(shell kubectl get secrets -n $(PDBNAMESPACE) -o custom-columns=":metadata.name" --no-headers) ) + $(eval SECRETSL:=$(shell kubectl get secrets -n $(LRSNAMESPACE) -o custom-columns=":metadata.name" --no-headers) ) + @[ "${SECRETSP}" ] && ( \ + printf "Deleteing secrets in namespace -n $(PDBNAMESPACE)\n") &&\ + ($(KUBECTL) delete secret $(SECRETSP) -n $(PDBNAMESPACE))\ + || ( echo "No screts in namespace $(PDBNAMESPACE)") + @[ "${SECRETSL}" ] && ( \ + printf "Deleteing secrets in namespace -n $(LRSNAMESPACE)\n") &&\ + ($(KUBECTL) delete secret $(SECRETSL) -n $(LRSNAMESPACE))\ + || ( echo "No screts in namespace $(PDBNAMESPACE)") + +cleanCert: + $(RM) $(SKEY) $(SCRT) $(CART) $(PRVKEY) $(PUBKEY) server.csr extfile.txt ca.srl \ + $(DBUSERFILE) $(DBPASSFILE) $(WBUSERFILE) $(WBPASSFILE) $(PDBUSRFILE) $(PDBPWDFILE)\ + e_$(DBUSERFILE) e_$(DBPASSFILE) e_$(WBUSERFILE) e_$(WBPASSFILE) e_$(PDBUSRFILE) e_$(PDBPWDFILE) + +### YAML FILE SECTION ### +define _opr +cp ${ORACLE_OPERATOR_YAML} . +export OPBASENAME=`basename ${ORACLE_OPERATOR_YAML}` +#export PDBNAMESPACE=$(cat ${PARAMETERS}|grep -v ^\#|grep PDBNAMESPACE|cut -d : -f 2) + +cp ${OPBASENAME} ${OPBASENAME}.ORIGNINAL +printf "\n\t\xF0\x9F\x91\x89 ${OPBASENAME}\n\n" +printf "\n\t\xF0\x9F\x91\x89 ${PDBNAMESPACE}\n\n" +sed -i 's/value: ""/value: ${OPRNAMESPACE},$PDBNAMESPACE,${LRSNAMESPACE}/g' ${OPBASENAME} +endef + +export opr = $(value _opr) + +operator: +# @ eval "$$opr" + $(CP) ${ORACLE_OPERATOR_YAML} . + ${CP} `basename ${ORACLE_OPERATOR_YAML}` `basename ${ORACLE_OPERATOR_YAML}`.ORG + $(SED) -i 's/value: ""/value: $(OPRNAMESPACE),$(PDBNAMESPACE),$(LRSNAMESPACE)/g' `basename ${ORACLE_OPERATOR_YAML}` + + +define _script00 +cat < authsection.yaml + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" +EOF + + +cat < ${PDBNAMESPACE}_binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: oracle-database-operator-oracle-database-operator-manager-rolebinding1 + namespace: ${PDBNAMESPACE} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system +EOF + +cat < ${LRSNAMESPACE}_binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: oracle-database-operator-oracle-database-operator-manager-rolebinding2 + namespace: ${LRSNAMESPACE} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system +EOF + +endef +export script00 = $(value _script00) +secyaml: + @ eval "$$script00" + + +#echo lrest pod creation +define _script01 +cat < ${LREST_POD} +apiVersion: database.oracle.com/${APIVERSION} +kind: LREST +metadata: + name: cdb-dev + namespace: cdbnamespace +spec: + cdbName: "DB12" + lrestImage: ${LRESTIMG} + lrestImagePullPolicy: "Always" + dbTnsurl : ${TNSALIAS} + replicas: 1 + deletePdbCascade: true + cdbAdminUser: + secret: + secretName: "dbuser" + key: "e_dbuser.txt" + cdbAdminPwd: + secret: + secretName: "dbpass" + key: "e_dbpass.txt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + cdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + cdbPubKey: + secret: + secretName: "pubkey" + key: "publicKey" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" +EOF + +endef +export script01 = $(value _script01) + + +define _script02 + +cat <${LRPDBCRE1} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + assertiveLrpdbDeletion: true + fileNameConversions: "NONE" + unlimitedStorage: false + pdbconfigmap: "config-map-pdb" + tdeImport: false + totalSize: "2G" + tempSize: "800M" + action: "Create" +EOF + +cat < ${LRPDBCRE2} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb2 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbprd" + assertiveLrpdbDeletion: true + fileNameConversions: "NONE" + unlimitedStorage: false + pdbconfigmap: "config-map-pdb" + tdeImport: false + totalSize: "2G" + tempSize: "800M" + action: "Create" +EOF + +cat <${LRPDBOPEN1} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" +EOF + +cat <${LRPDBOPEN2} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb2 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbprd" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" +EOF + +cat <${LRPDBOPEN3} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb3 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "new_clone" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" +EOF + +cat <${LRPDBCLOSE1} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" +EOF + +cat <${LRPDBCLOSE2} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb2 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbprd" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" +EOF + +cat <${LRPDBCLOSE3} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb3 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: ""new_clone" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" +EOF + +cat < ${LRPDBCLONE1} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb3 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "new_clone" + srcPdbName: "pdbdev" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + pdbconfigmap: "config-map-pdb" + assertiveLrpdbDeletion: true + action: "Clone" +EOF + +cat < ${LRPDBCLONE2} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb4 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "new_clone2" + srcPdbName: "pdbprd" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + pdbconfigmap: "config-map-pdb" + assertiveLrpdbDeletion: true + action: "Clone" +EOF + +cat < ${LRPDBDELETE1} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + pdbName: "pdbdev" + action: "Delete" + dropAction: "INCLUDING" +EOF + +cat < ${LRPDBDELETE2} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb2 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + pdbName: "pdbprd" + action: "Delete" + dropAction: "INCLUDING" +EOF + +cat < ${LRPDBUNPLUG1} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "Unplug" +EOF + +cat <${LRPDBPLUG1} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "plug" + fileNameConversions: "NONE" + sourceFileNameConversions: "NONE" + copyAction: "MOVE" + totalSize: "1G" + tempSize: "100M" + assertiveLrpdbDeletion: true + pdbconfigmap: "config-map-pdb" + action: "Plug" +EOF + +cat <${LRPDBMAP1} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + assertiveLrpdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" +EOF + +cat <${LRPDBMAP2} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb2 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbprd" + assertiveLrpdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" +EOF + + +cat <${LRPDBMAP3} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb3 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "new_clone" + assertiveLrpdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" +EOF + +cat <${CONFIG_MAP} +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-map-pdb + namespace: ${PDBNAMESPACE} +data: + rdbmsparameters.txt: | + session_cached_cursors;100;spfile + open_cursors;100;spfile + db_file_multiblock_read_count;16;spfile + test_invalid_parameter;16;spfile +EOF + + +cat < ${ALTERSYSTEMYAML} +apiVersion: database.oracle.com/${APIVERSION} +kind: LRPDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${LRSNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + action: "Alter" + alterSystemParameter : "cpu_count" + alterSystemValue : "3" + parameterScope : "memory" + + +EOF + +## Auth information +for _file in ${LRPDBCRE1} ${LRPDBCRE2} ${LRPDBOPEN1} ${LRPDBOPEN2} ${LRPDBOPEN3} ${LRPDBCLOSE1} ${LRPDBCLOSE2} ${LRPDBCLOSE3} ${LRPDBCLONE1} ${LRPDBCLONE2} ${LRPDBDELETE1} ${LRPDBDELETE2} ${LRPDBUNPLUG1} ${LRPDBPLUG1} ${LRPDBMAP1} ${LRPDBMAP2} ${LRPDBMAP3} ${ALTERSYSTEMYAML} +do +ls -ltr ${_file} + cat authsection.yaml >> ${_file} +done +rm authsection.yaml +endef + +export script02 = $(value _script02) + +genyaml: secyaml + @ eval "$$script01" + @ eval "$$script02" + +cleanyaml: + - $(RM) $(LRPDBMAP3) $(LRPDBMAP2) $(LRPDBMAP1) $(LRPDBPLUG1) $(LRPDBUNPLUG1) $(LRPDBDELETE2) $(LRPDBDELETE1) $(LRPDBCLONE2) $(LRPDBCLONE1) $(LRPDBCLOSE3) $(LRPDBCLOSE2) $(LRPDBCLOSE1) $(LRPDBOPEN3) $(LRPDBOPEN2) $(LRPDBOPEN1) $(LRPDBCRE2) $(LRPDBCRE1) $(LREST_POD) ${ALTERSYSTEMYAML} + - $(RM) ${CONFIG_MAP} ${PDBNAMESPACE}_binding.yaml ${LRSNAMESPACE}_binding.yaml + + + + +################# +### PACKAGING ### +################# + +pkg: + - $(RM) -rf /tmp/pkgtestplan + $(MKDIR) /tmp/pkgtestplan + $(CP) -R * /tmp/pkgtestplan + $(CP) ../../../../oracle-database-operator.yaml /tmp/pkgtestplan/ + $(TAR) -C /tmp -cvf ~/pkgtestplan_$(DATE).tar pkgtestplan + +################ +### diag ### +################ + +login: + $(KUBECTL) exec `$(KUBECTL) get pods -n $(LRSNAMESPACE)|grep rest|cut -d ' ' -f 1` -n $(LRSNAMESPACE) -it -- /bin/bash + + +reloadop: + echo "RESTARTING OPERATOR" + $(eval OP1 := $(shell $(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -1|cut -d ' ' -f 1 )) + $(eval OP2 := $(shell $(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -2|tail -1|cut -d ' ' -f 1 )) + $(eval OP3 := $(shell $(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|tail -1|cut -d ' ' -f 1 )) + $(KUBECTL) get pod $(OP1) -n $(OPRNAMESPACE) -o yaml | kubectl replace --force -f - + $(KUBECTL) get pod $(OP2) -n $(OPRNAMESPACE) -o yaml | kubectl replace --force -f - + $(KUBECTL) get pod $(OP3) -n $(OPRNAMESPACE) -o yaml | kubectl replace --force -f - + + +dump: + @$(eval TMPSP := $(shell date "+%y%m%d%H%M%S" )) + @$(eval DIAGFILE := ./opdmp.$(TMPSP)) + @>$(DIAGFILE) + @echo "OPERATOR DUMP" >> $(DIAGFILE) + @echo "~~~~~~~~~~~~~" >> $(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -1|cut -d ' ' -f 1` -n $(OPRNAMESPACE) >>$(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -2|tail -1 | cut -d ' ' -f 1` -n $(OPRNAMESPACE) >>$(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|tail -1|cut -d ' ' -f 1` -n $(OPRNAMESPACE) >>$(DIAGFILE) + +####################################################### +#### TEST SECTION #### +####################################################### + +run00: + @$(call msg,"lrest pod creation") + - $(KUBECTL) delete lrest cdb-dev -n $(LRSNAMESPACE) + $(KUBECTL) apply -f $(LREST_POD) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" lrest cdb-dev -n $(LRSNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"lrest pod completed") + $(KUBECTL) get lrest -n $(LRSNAMESPACE) + $(KUBECTL) get pod -n $(LRSNAMESPACE) + +run01.1: + @$(call msg,"lrpdb pdb1 creation") + $(KUBECTL) apply -f $(LRPDBCRE1) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" lrpdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "lrpdb pdb1 creation completed") + $(KUBECTL) get lrpdb pdb1 -n $(PDBNAMESPACE) + +run01.2: + @$(call msg, "lrpdb pdb2 creation") + $(KUBECTL) apply -f $(LRPDBCRE2) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" lrpdb pdb2 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "lrpdb pdb2 creation completed") + $(KUBECTL) get lrpdb pdb2 -n $(PDBNAMESPACE) + +run02.1: + @$(call msg, "lrpdb pdb1 open") + $(KUBECTL) apply -f $(LRPDBOPEN1) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="READ WRITE" lrpdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "lrpdb pdb1 open completed") + $(KUBECTL) get lrpdb pdb1 -n $(PDBNAMESPACE) + +run02.2: + @$(call msg,"lrpdb pdb2 open") + $(KUBECTL) apply -f $(LRPDBOPEN2) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="READ WRITE" lrpdb pdb2 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"lrpdb pdb2 open completed") + $(KUBECTL) get lrpdb pdb2 -n $(PDBNAMESPACE) + + +run03.1: + @$(call msg,"clone pdb1-->pdb3") + $(KUBECTL) apply -f $(LRPDBCLONE1) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" lrpdb pdb3 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"clone pdb1-->pdb3 completed") + $(KUBECTL) get lrpdb pdb3 -n $(PDBNAMESPACE) + + +run03.2: + @$(call msg,"clone pdb2-->pdb4") + $(KUBECTL) apply -f $(LRPDBCLONE2) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" lrpdb pdb4 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"clone pdb2-->pdb4 completed") + $(KUBECTL) get lrpdb pdb3 -n $(PDBNAMESPACE) + + +run04.1: + @$(call msg,"lrpdb pdb1 close") + $(KUBECTL) apply -f $(LRPDBCLOSE1) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="MOUNTED" lrpdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "lrpdb pdb1 close completed") + $(KUBECTL) get lrpdb pdb1 -n $(PDBNAMESPACE) + +run04.2: + @$(call msg,"lrpdb pdb2 close") + $(KUBECTL) apply -f $(LRPDBCLOSE2) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="MOUNTED" lrpdb pdb2 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"lrpdb pdb2 close completed") + $(KUBECTL) get lrpdb pdb2 -n $(PDBNAMESPACE) + +run05.1: + @$(call msg,"lrpdb pdb1 unplug") + $(KUBECTL) apply -f $(LRPDBUNPLUG1) + $(KUBECTL) wait --for=delete lrpdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"lrpdb pdb1 unplug completed") + +run06.1: + @$(call msg, "lrpdb pdb1 plug") + $(KUBECTL) apply -f $(LRPDBPLUG1) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" lrpdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "lrpdb pdb1 plug completed") + $(KUBECTL) get lrpdb pdb1 -n $(PDBNAMESPACE) + +run07.1: + @$(call msg,"lrpdb pdb1 delete ") + - $(KUBECTL) apply -f $(LRPDBCLOSE1) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="MOUNTED" lrpdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + $(KUBECTL) apply -f $(LRPDBDELETE1) + $(KUBECTL) wait --for=delete lrpdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"lrpdb pdb1 delete") + $(KUBECTL) get lrpdb -n $(PDBNAMESPACE) + +run99.1: + $(KUBECTL) delete lrest cdb-dev -n cdbnamespace + $(KUBECTL) wait --for=delete lrest cdb-dev -n $(LRSNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + $(KUBECTL) get lrest -n cdbnamespaace + $(KUBECTL) get lrpdb -n pdbnamespaace + +runall01: run00 run01.1 run01.2 run02.1 run02.2 run03.1 run03.2 run04.1 run04.2 run05.1 run06.1 run07.1 + + diff --git a/docs/multitenant/lrest-based/usecase/map_pdb1_resource.yaml b/docs/multitenant/lrest-based/usecase/map_pdb1_resource.yaml new file mode 100644 index 00000000..2cd57b87 --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/map_pdb1_resource.yaml @@ -0,0 +1,49 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + assertiveLrpdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/map_pdb2_resource.yaml b/docs/multitenant/lrest-based/usecase/map_pdb2_resource.yaml new file mode 100644 index 00000000..bab614cf --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/map_pdb2_resource.yaml @@ -0,0 +1,49 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb2 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbprd" + assertiveLrpdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/map_pdb3_resource.yaml b/docs/multitenant/lrest-based/usecase/map_pdb3_resource.yaml new file mode 100644 index 00000000..7bbae48d --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/map_pdb3_resource.yaml @@ -0,0 +1,49 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb3 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "new_clone" + assertiveLrpdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/open_pdb1_resource.yaml b/docs/multitenant/lrest-based/usecase/open_pdb1_resource.yaml new file mode 100644 index 00000000..a845a0bd --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/open_pdb1_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/open_pdb2_resource.yaml b/docs/multitenant/lrest-based/usecase/open_pdb2_resource.yaml new file mode 100644 index 00000000..9356184f --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/open_pdb2_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb2 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbprd" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/open_pdb3_resource.yaml b/docs/multitenant/lrest-based/usecase/open_pdb3_resource.yaml new file mode 100644 index 00000000..1b8024ba --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/open_pdb3_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb3 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "new_clone" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/parameters.txt b/docs/multitenant/lrest-based/usecase/parameters.txt new file mode 100644 index 00000000..1f21ed38 --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/parameters.txt @@ -0,0 +1,52 @@ + +######################## +## REST SERVER IMAGE ### +######################## + +LRESTIMG:container-registry.oracle.com/database/operator:lrest-241210-amd64 + +############################## +## TNS URL FOR CDB CREATION ## +############################## +TNSALIAS:"(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS)))" + +########################################### +## CDB USER FOR PDB LIFECYCLE MANAGMENT ### +########################################### + +DBUSER:restdba +DBPASS:CLWKO655321 + +####################### +## HTTPS CREDENTIAL ### +####################### + +WBUSER:welcome +WBPASS:welcome1 + +##################### +## PDB ADMIN USER ### +##################### + +PDBUSR:Citizenkane +PDBPWD:Rosebud + +################### +### NAMESPACES #### +################### + +PDBNAMESPACE:pdbnamespace +LRSNAMESPACE:cdbnamespace + + +#################### +### COMPANY NAME ### +#################### + +COMPANY:oracle + +#################### +### APIVERSION ### +#################### + +APIVERSION:v4 diff --git a/docs/multitenant/lrest-based/usecase/pdbnamespace_binding.yaml b/docs/multitenant/lrest-based/usecase/pdbnamespace_binding.yaml new file mode 100644 index 00000000..5af79ed6 --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/pdbnamespace_binding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: oracle-database-operator-oracle-database-operator-manager-rolebinding1 + namespace: pdbnamespace +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system diff --git a/docs/multitenant/lrest-based/usecase/plug_pdb1_resource.yaml b/docs/multitenant/lrest-based/usecase/plug_pdb1_resource.yaml new file mode 100644 index 00000000..d7d310db --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/plug_pdb1_resource.yaml @@ -0,0 +1,54 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "plug" + fileNameConversions: "NONE" + sourceFileNameConversions: "NONE" + copyAction: "MOVE" + totalSize: "1G" + tempSize: "100M" + assertiveLrpdbDeletion: true + pdbconfigmap: "config-map-pdb" + action: "Plug" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/lrest-based/usecase/unplug_pdb1_resource.yaml b/docs/multitenant/lrest-based/usecase/unplug_pdb1_resource.yaml new file mode 100644 index 00000000..a5da5a57 --- /dev/null +++ b/docs/multitenant/lrest-based/usecase/unplug_pdb1_resource.yaml @@ -0,0 +1,46 @@ +apiVersion: database.oracle.com/v4 +kind: LRPDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "Unplug" + adminpdbUser: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminpdbPass: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + lrpdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + lrpdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + lrpdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/NamespaceSeg.md b/docs/multitenant/ords-based/NamespaceSeg.md new file mode 100644 index 00000000..6738fe56 --- /dev/null +++ b/docs/multitenant/ords-based/NamespaceSeg.md @@ -0,0 +1,14 @@ + + +# Namespace segregation + +With the namespace segregation pdb controller and cdb controller run in different namespaces. The new functionality introduces a new parameter (the cdb namespace) in pdb crd definition. In case you don't need the namespace segregation you have to sepcify the namespace name that you are using for yours crd and pods anyway. Refer to usercase01 and usecase02 to see single namespace configuration. Refer to usecase03 to see examples of namespace segregation. + +# Secrets + +In order to use multiple namespace we need to create approriate secrets in each namespace. Tls certificate secrets must be created in all namespaces (db-ca db-tls). + +![general_schema](./images/K8S_NAMESPACE_SEG.png) + + + diff --git a/docs/multitenant/ords-based/README.md b/docs/multitenant/ords-based/README.md new file mode 100644 index 00000000..edfd0208 --- /dev/null +++ b/docs/multitenant/ords-based/README.md @@ -0,0 +1,411 @@ + + +# Oracle Multitenant Database Controllers + +The Oracle Database Operator for Kubernetes uses two controllers to manage the [Pluggable Database lifecycle][oradocpdb] + +- CDB controller +- PDB controller + +By using CDB/PDB controllers, you can perform the following actions **CREATE**, **MODIFY(OPEN/COSE)**, **DELETE**, **CLONE**, **PLUG** and **UNPLUG** against pluggable database + +Examples are located under the following directories: + +- the directories [`Usecase`](./usecase/) and [`usecase01`](./usecase01/) contain a [configuration file](./usecase/parameters.txt) where you can specify all the details of your environment. A [`makefile`](./usecase/makefile) takes this file as input to generate all of the `yaml` files. There is no need to edit `yaml` files one by one. +- [Singlenamespace provisioning](./provisioning/singlenamespace/) This file contains base example files that you can use to manage the PDB and CDB within a single namespace. +- [Multinamespace provisioning](./provisioning/multinamespace/) This file contains base example files that you can use to manage the PDB and CDB in different namespaces. +- [Usecase01](./usecase01/README.md) [Usecase02](./usecase02/README.md) This file contains other step-by-step examples; + +Automatic `yaml` generation is not available for the directory `usecase02` and provisioning directories. + +**NOTE** the CDB controller is not intended to manage the container database. The CDB controller is meant to provide a pod with a REST server connected to the container database that you can use to manage PDBs. + + +## Macro steps for setup + +- Deploy the Oracle Database Operator (operator, or `OraOperator`) +- [Create Ords based image for CDB pod](./provisioning/ords_image.md) +- [Container RDBMB user creation](#prepare-the-container-database-for-pdb-lifecycle-management-pdb-lm) +- Create certificates for https connection +- Create secrets for credentials and certificates +- Create CDB pod using the Ords based image + +## Oracle DB Operator Multitenant Database Controller Deployment + +To deploy `OraOperator`, use this [Oracle Database Operator for Kubernetes](https://github.com/oracle/oracle-database-operator/blob/main/README.md) step-by-step procedure. + +After the **Oracle Database Operator** is deployed, you can see the Oracle Database (DB) Operator Pods running in the Kubernetes Cluster. The multitenant controllers are deployed as part of the `OraOperator` deployment. You can see the CRDs (Custom Resource Definition) for the CDB and PDBs in the list of CRDs. The following output is an example of such a deployment: + +```bash +[root@test-server oracle-database-operator]# kubectl get ns +NAME STATUS AGE +cert-manager Active 32h +default Active 245d +kube-node-lease Active 245d +kube-public Active 245d +kube-system Active 245d +oracle-database-operator-system Active 24h <---- namespace to deploy the Oracle Database Operator + +[root@test-server oracle-database-operator]# kubectl get all -n oracle-database-operator-system +NAME READY STATUS RESTARTS AGE +pod/oracle-database-operator-controller-manager-665874bd57-dlhls 1/1 Running 0 28s +pod/oracle-database-operator-controller-manager-665874bd57-g2cgw 1/1 Running 0 28s +pod/oracle-database-operator-controller-manager-665874bd57-q42f8 1/1 Running 0 28s + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/oracle-database-operator-controller-manager-metrics-service ClusterIP 10.96.130.124 8443/TCP 29s +service/oracle-database-operator-webhook-service ClusterIP 10.96.4.104 443/TCP 29s + +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/oracle-database-operator-controller-manager 3/3 3 3 29s + +NAME DESIRED CURRENT READY AGE +replicaset.apps/oracle-database-operator-controller-manager-665874bd57 3 3 3 29s +[root@docker-test-server oracle-database-operator]# + +[root@test-server oracle-database-operator]# kubectl get crd +NAME CREATED AT +autonomouscontainerdatabases.database.oracle.com 2022-06-22T01:21:36Z +autonomousdatabasebackups.database.oracle.com 2022-06-22T01:21:36Z +autonomousdatabaserestores.database.oracle.com 2022-06-22T01:21:37Z +autonomousdatabases.database.oracle.com 2022-06-22T01:21:37Z +cdbs.database.oracle.com 2022-06-22T01:21:37Z <---- +certificaterequests.cert-manager.io 2022-06-21T17:03:46Z +certificates.cert-manager.io 2022-06-21T17:03:47Z +challenges.acme.cert-manager.io 2022-06-21T17:03:47Z +clusterissuers.cert-manager.io 2022-06-21T17:03:48Z +dbcssystems.database.oracle.com 2022-06-22T01:21:38Z +issuers.cert-manager.io 2022-06-21T17:03:49Z +oraclerestdataservices.database.oracle.com 2022-06-22T01:21:38Z +orders.acme.cert-manager.io 2022-06-21T17:03:49Z +pdbs.database.oracle.com 2022-06-22T01:21:39Z <--- +shardingdatabases.database.oracle.com 2022-06-22T01:21:39Z +singleinstancedatabases.database.oracle.com 2022-06-22T01:21:40Z +``` + + +## Prerequisites to manage PDB Life Cycle using Oracle DB Operator Multitenant Database Controller + +* [Prepare the container database (CDB) for PDB Lifecycle Management or PDB-LM](#prepare-cdb-for-pdb-lifecycle-management-pdb-lm) +* [Oracle REST Data Service or ORDS Image](#oracle-rest-data-service-ords-image) +* [Kubernetes Secrets](#kubernetes-secrets) +* [Kubernetes CRD for CDB](#cdb-crd) +* [Kubernetes CRD for PDB](#pdb-crd) + +## Prepare the container database for PDB Lifecycle Management (PDB-LM) + +Pluggable Database (PDB) management operations are performed in the Container Database (CDB). These operations include **create**, **clone**, **plug**, **unplug**, **delete**, **modify** and **map pdb**. + +To perform PDB lifecycle management operations, you must first use the following steps to define the default CDB administrator credentials on target CDBs: + +Create the CDB administrator user and grant the required privileges. In this example, the user is `C##DBAPI_CDB_ADMIN`. However, any suitable common username can be used. + +```SQL +SQL> conn /as sysdba + +-- Create following users at the database level: + +ALTER SESSION SET "_oracle_script"=true; +DROP USER C##DBAPI_CDB_ADMIN cascade; +CREATE USER C##DBAPI_CDB_ADMIN IDENTIFIED BY CONTAINER=ALL ACCOUNT UNLOCK; +GRANT SYSOPER TO C##DBAPI_CDB_ADMIN CONTAINER = ALL; +GRANT SYSDBA TO C##DBAPI_CDB_ADMIN CONTAINER = ALL; +GRANT CREATE SESSION TO C##DBAPI_CDB_ADMIN CONTAINER = ALL; + + +-- Verify the account status of the following usernames. They should not be in locked status: + +col username for a30 +col account_status for a30 +select username, account_status from dba_users where username in ('ORDS_PUBLIC_USER','C##DBAPI_CDB_ADMIN','APEX_PUBLIC_USER','APEX_REST_PUBLIC_USER'); +``` + +## OCI OKE (Kubernetes Cluster) + +You can use an [OKE in Oracle Cloud Infrastructure][okelink] to configure the controllers for PDB lifecycle management. **Note that there is no restriction about container database location; it can be anywhere (on Cloud or on-premises).** +To quickly create an OKE cluster in your OCI cloud environment you can use the following [link](./provisioning/quickOKEcreation.md). +In this setup example [provisioning example setup](./provisioning/example_setup_using_oci_oke_cluster.md), the Container Database is running on an OCI Exadata Database Cluster. + + +## Oracle REST Data Service (ORDS) Image + +The PDB Database controllers require a pod running a dedicated REST server image based on [ORDS][ordsdoc]. Read the following [document on ORDS images](./provisioning/ords_image.md) to build the ORDS images. + + +## Kubernetes Secrets + + Multitenant Controllers use Kubernetes Secrets to store the required credential and HTTPS certificates. + + **Note** In multi-namespace environments you must create specific Secrets for each namespaces. + +### Secrets for CERTIFICATES + +Create the certificates and key on your local host, and then use them to create the Kubernetes Secret. + +```bash +openssl genrsa -out ca.key 2048 +openssl req -new -x509 -days 365 -key ca.key -subj "/C=US/ST=California/L=SanFrancisco/O=oracle /CN=cdb-dev-ords /CN=localhost Root CA " -out ca.crt +openssl req -newkey rsa:2048 -nodes -keyout tls.key -subj "/C=US/ST=California/L=SanFrancisco/O=oracle /CN=cdb-dev-ords /CN=localhost" -out server.csr +echo "subjectAltName=DNS:cdb-dev-ords,DNS:www.example.com" > extfile.txt +openssl x509 -req -extfile extfile.txt -days 365 -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out tls.crt +``` + +```bash +kubectl create secret tls db-tls --key="tls.key" --cert="tls.crt" -n oracle-database-operator-system +kubectl create secret generic db-ca --from-file=ca.crt -n oracle-database-operator-system +``` + +image_not_found + +**Note:** Remove temporary files after successfful Secret creation. + +### Secrets for CDB CRD + + **Note:** base64 encoded secrets are no longer supported; use OpenSSL secrets as documented in the following section. After successful creation of the CDB Resource, the CDB and PDB Secrets can be deleted from the Kubernetes system. Don't leave plaintext files containing sensitive data on disk. After loading the Secret, remove the plaintext file or move it to secure storage. + + ```bash + +export PRVKEY=ca.key +export PUBKEY=public.pem +WBUSERFILE=wbuser.txt +WBPASSFILE=wbpass.txt +CDBUSRFILE=cdbusr.txt +CDBPWDFILE=cdbpwd.txt +SYSPWDFILE=syspwd.txt +ORDPWDFILE=ordpwd.txt +PDBUSRFILE=pdbusr.txt +PDBPWDFILE=pdbpwd.txt + +# Webuser credential +echo [WBUSER] > ${WBUSERFILE} +echo [WBPASS] > ${WBPASSFILE} + +# CDB admin user credentioan +echo [CDBPWD] > ${CDBPWDFILE} +echo [CDBUSR] > ${CDBUSRFILE} + +# SYS Password +echo [SYSPWD] > ${SYSPWDFILE} + +# Ords Password +echo [ORDPWD] > ${ORDPWDFILE} + +## PDB admin credential +echo [PDBUSR] > ${PDBUSRFILE} +echo [PDBPWD] > ${PDBPWDFILE} + +#Secrets creation for pub and priv keys +openssl rsa -in ${PRVKEY} -outform PEM -pubout -out ${PUBKEY} +kubectl create secret generic pubkey --from-file=publicKey=${PUBKEY} -n ${CDBNAMESPACE} +kubectl create secret generic prvkey --from-file=privateKey=${PRVKEY} -n ${CDBNAMESPACE} +kubectl create secret generic prvkey --from-file=privateKey="${PRVKEY}" -n ${PDBNAMESPACE} + +#Password encryption +openssl rsautl -encrypt -pubin -inkey ${PUBKEY} -in ${WBUSERFILE} |base64 > e_${WBUSERFILE} +openssl rsautl -encrypt -pubin -inkey ${PUBKEY} -in ${WBPASSFILE} |base64 > e_${WBPASSFILE} +openssl rsautl -encrypt -pubin -inkey ${PUBKEY} -in ${CDBPWDFILE} |base64 > e_${CDBPWDFILE} +openssl rsautl -encrypt -pubin -inkey ${PUBKEY} -in ${CDBUSRFILE} |base64 > e_${CDBUSRFILE} +openssl rsautl -encrypt -pubin -inkey ${PUBKEY} -in ${SYSPWDFILE} |base64 > e_${SYSPWDFILE} +openssl rsautl -encrypt -pubin -inkey ${PUBKEY} -in ${ORDPWDFILE} |base64 > e_${ORDPWDFILE} +openssl rsautl -encrypt -pubin -inkey ${PUBKEY} -in ${PDBUSRFILE} |base64 > e_${PDBUSRFILE} +openssl rsautl -encrypt -pubin -inkey ${PUBKEY} -in ${PDBPWDFILE} |base64 > e_${PDBPWDFILE} + +#Ecrypted secrets creation +kubectl create secret generic wbuser --from-file=e_${WBUSERFILE} -n ${CDBNAMESPACE} +kubectl create secret generic wbpass --from-file=e_${WBPASSFILE} -n ${CDBNAMESPACE} +kubectl create secret generic wbuser --from-file=e_${WBUSERFILE} -n ${PDBNAMESPACE} +kubectl create secret generic wbpass --from-file=e_${WBPASSFILE} -n ${PDBNAMESPACE} +kubectl create secret generic cdbpwd --from-file=e_${CDBPWDFILE} -n ${CDBNAMESPACE} +kubectl create secret generic cdbusr --from-file=e_${CDBUSRFILE} -n ${CDBNAMESPACE} +kubectl create secret generic syspwd --from-file=e_${SYSPWDFILE} -n ${CDBNAMESPACE} +kubectl create secret generic ordpwd --from-file=e_${ORDPWDFILE} -n ${CDBNAMESPACE} +kubectl create secret generic pdbusr --from-file=e_${PDBUSRFILE} -n ${PDBNAMESPACE} +kubectl create secret generic pdbpwd --from-file=e_${PDBPWDFILE} -n ${PDBNAMESPACE} + +#Get rid of the swap files +rm ${WBUSERFILE} ${WBPASSFILE} ${CDBPWDFILE} ${CDBUSRFILE} \ + ${SYSPWDFILE} ${ORDPWDFILE} ${PDBUSRFILE} ${PDBPWDFILE} \ + e_${WBUSERFILE} e_${WBPASSFILE} e_${CDBPWDFILE} e_${CDBUSRFILE} \ + e_${SYSPWDFILE} e_${ORDPWDFILE} e_${PDBUSRFILE} e_${PDBPWDFILE} +``` + +Check Secrets details + +```bash +kubectl describe secrets syspwd -n cdbnamespace +Name: syspwd +Namespace: cdbnamespace +Labels: +Annotations: + +Type: Opaque + +Data +==== +e_syspwd.txt: 349 bytes +``` +Example of `yaml` file Secret section: + +```yaml +[...] + sysAdminPwd: + secret: + secretName: "syspwd" + key: "e_syspwd.txt" + ordsPwd: + secret: + secretName: "ordpwd" + key: "e_ordpwd.txt" +[...] +``` + +## CDB CRD + +The Oracle Database Operator Multitenant Controller creates the CDB as a custom resource object kind that models a target CDB as a native Kubernetes object. This object kind is used only to create Pods to connect to the target CDB to perform PDB-LM operations. Each CDB resource follows the CDB CRD as defined here: [`config/crd/bases/database.oracle.com_cdbs.yaml`](../../../config/crd/bases/database.oracle.com_cdbs.yaml) + +To create a CDB CRD, use this example`.yaml` file: [`cdb_create.yaml`](../multitenant/provisioning/singlenamespace/cdb_create.yaml) + +**Note:** The password and username fields in this *cdb.yaml* Yaml are the Kubernetes Secrets created earlier in this procedure. For more information, see the section [Kubernetes Secrets](https://kubernetes.io/docs/concepts/configuration/secret/). To understand more about creating secrets for pulling images from a Docker private registry, see [Kubernetes Private Registry Documenation]( https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/). + +Create a CDB CRD Resource example + +```bash +kubectl apply -f cdb_create.yaml +``` + +see [usecase01][uc01] and usecase02[uc02] for more information about file configuration + +## PDB CRD + +The Oracle Database Operator Multitenant Controller creates the PDB object kind as a custom resource that models a PDB as a native Kubernetes object. There is a one-to-one mapping between the actual PDB and the Kubernetes PDB Custom Resource. You cannot have more than one Kubernetes resource for a target PDB. This PDB resource can be used to perform PDB-LM operations by specifying the action attribute in the PDB Specs. Each PDB resource follows the PDB CRD as defined here: [config/crd/bases/database.oracle.com_pdbs.yaml](../../../config/crd/bases/database.oracle.com_pdbs.yaml) + +Yaml file [pdb_create.yaml](../multitenant/provisioning/singlenamespace/pdb_create.yaml) to create a pdb + +```bash +kubectl apply -f pdb_create.yaml +``` + +## CRD TABLE PARAMETERS + +| yaml file parameters | value | description /ords parameter | CRD | +|------------------ |--------------------------- |-------------------------------------------------------------------------------|-----------| +| dbserver | or | [--db-hostname][1] | CDB | +| dbTnsurl | | [--db-custom-url/db.customURL][dbtnsurl] | CDB | +| port | | [--db-port][2] | CDB | +| cdbName | | Container Name | CDB | +| name | | ORDS podname prefix in `cdb.yaml` | CDB | +| name | | Pdb resource in `pdb.yaml` | PDB | +| ordsImage | ords-dboper:latest | ORDS pod public container registry | CDB | +| pdbName | | Pluggable database (PDB) name | Container database (CDB) | +| servicename | | [--db-servicename][3] | CDB | +| sysadmin_user | | [--admin-user][adminuser] | CDB | +| sysadmin_pwd | | [--password-stdin][pwdstdin] | CDB | +| cdbadmin_user | | [db.cdb.adminUser][1] | CDB | +| cdbadmin_pwd | | [db.cdb.adminUser.password][cdbadminpwd] | CDB | +| webserver_user | | [https user][http] NOT A DB USER | CDB PDB | +| webserver_pwd | | [http user password][http] | CDB PDB | +| ords_pwd | | [ORDS_PUBLIC_USER password][public_user] | CDB | +| pdbTlsKey | | [standalone.https.cert.key][key] | PDB | +| pdbTlsCrt | | [standalone.https.cert][cr] | PDB | +| pdbTlsCat | | certificate authority | PDB | +| cdbTlsKey | | [standalone.https.cert.key][key] | CDB | +| cdbTlsCrt | | [standalone.https.cert][cr] | CDB | +| cdbTlsCat | | Certificate authority | CDB | +| cdbOrdsPrvKey | | Private key | CDB | +| pdbOrdsPrvKey | | Private key | PDB | +| xmlFileName | | Path for the unplug and plug operation | PDB | +| srcPdbName | | Name of the database that you want to be cloned | PDB | +| action | | Create open close delete clone plug unplug and map | PDB | +| deletePdbCascade | boolean | Delete PDBs cascade during CDB deletion | CDB | +| assertivePdbDeletion | boolean | Deleting the PDB crd means deleting the PDB as well | PDB | +| fileNameConversions | | Used for database cloning | PDB | +| totalSize | | `dbsize` | PDB | +| pdbState | | Change PDB state | PDB | +| modifyOption | | To be used along with `pdbState` | PDB | +| dropAction | | Delete datafiles during PDB deletion | PDB | +| sourceFileNameConversions | | [sourceFileNameConversions(optional): string][4] | PDB | +| tdeKeystorePath | | [tdeKeystorePath][tdeKeystorePath] | N/A | +| tdeExport | | [tdeExport] | N/A ] +| tdeSecret | | [tdeSecret][tdeSecret] | N/A | +| tdePassword | | [tdeSecret][tdeSecret] | N/A | + + + + +## Usecases files list + +### Single Namespace + +1. [Create CDB](./provisioning/singlenamespace/cdb_create.yaml) +2. [Create PDB](./provisioning/singlenamespace/pdb_create.yaml) +3. [Clone PDB](./provisioning/singlenamespace/pdb_clone.yaml) +4. [Open PDB](./provisioning/singlenamespace/pdb_open.yaml) +4. [Close PDB](./provisioning/singlenamespace/pdb_close.yaml) +5. [Delete PDB](./provisioning/singlenamespace/pdb_delete.yaml) +6. [Unplug PDB](./provisioning/singlenamespace/pdb_unplug.yaml) +7. [Plug PDB](./provisioning/singlenamespace/pdb_plug.yaml) + +### Multiple namespace (cdbnamespace,dbnamespace) + +1. [Create CDB](./provisioning/multinamespace/cdb_create.yaml) +2. [Create PDB](./provisioning/multinamespace/pdb_create.yaml) +3. [Clone PDB](./provisioning/multinamespace/pdb_clone.yaml) +4. [Open PDB](./provisioning/multinamespace/pdb_open.yaml) +4. [Close PDB](./provisioning/multinamespace/pdb_close.yaml) +5. [Delete PDB](./provisioning/multinamespace/pdb_delete.yaml) +6. [Unplug PDB](./provisioning/multinamespace/pdb_unplug.yaml) + +## Known issues + + - ORDS installation failure if pluaggable databases in the container db are not openedS + + - Version 1.1.0: encoded password for https authentication may include carriage return as consequence the https request fails with http 404 error. W/A generate encoded password using **printf** instead of **echo**. + + - pdb controller authentication suddenly fails without any system change. Check the certificate expiration date **openssl .... -days 365** + + - Nothing happens after applying cdb yaml files: Make sure to have properly configured the WHATCH_NAMESPACE list in the operator yaml file + + [okelink]:https://docs.oracle.com/en-us/iaas/Content/ContEng/Concepts/contengoverview.htm + + [ordsdoc]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/23.1/index.html + + [uc01]:../multitenant/usecase01/README.md + + [uc02]:../multitenant/usecase02/README.md + + [oradocpdb]:https://docs.oracle.com/en/database/oracle/oracle-database/21/multi/introduction-to-the-multitenant-architecture.html#GUID-AB84D6C9-4BBE-4D36-992F-2BB85739329F + + [1]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-E9625FAB-9BC8-468B-9FF9-443C88D76FA1:~:text=Table%202%2D2%20Command%20Options%20for%20Command%2DLine%20Interface%20Installation + + [2]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-E9625FAB-9BC8-468B-9FF9-443C88D76FA1:~:text=Table%202%2D2%20Command%20Options%20for%20Command%2DLine%20Interface%20Installation + + [3]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-DAA027FA-A4A6-43E1-B8DD-C92B330C2341:~:text=%2D%2Ddb%2Dservicename%20%3Cstring%3E + + [4]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.3/orrst/op-database-pdbs-post.html + +[adminuser]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-A9AED253-4EEC-4E13-A0C4-B7CE82EC1C22:~:text=Table%202%2D6%20Command%20Options%20for%20Uninstall%20CLI + +[public_user]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/using-multitenant-architecture-oracle-rest-data-services.html#GUID-E64A141A-A71F-4979-8D33-C5F8496D3C19:~:text=Preinstallation%20Tasks%20for%20Oracle%20REST%20Data%20Services%20CDB%20Installation + +[key]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/about-REST-configuration-files.html#GUID-006F916B-8594-4A78-B500-BB85F35C12A0:~:text=standalone.https.cert.key + +[cr]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/about-REST-configuration-files.html#GUID-006F916B-8594-4A78-B500-BB85F35C12A0 + +[cdbadminpwd]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/about-REST-configuration-files.html#GUID-006F916B-8594-4A78-B500-BB85F35C12A0:~:text=Table%20C%2D1%20Oracle%20REST%20Data%20Services%20Configuration%20Settings + + +[pwdstdin]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-88479C84-CAC1-4133-A33E-7995A645EC05:~:text=default%20database%20pool.-,2.1.4.1%20Understanding%20Command%20Options%20for%20Command%2DLine%20Interface%20Installation,-Table%202%2D2 + +[http]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-BEECC057-A8F5-4EAB-B88E-9828C2809CD8:~:text=Example%3A%20delete%20%5B%2D%2Dglobal%5D-,user%20add,-Add%20a%20user + +[dbtnsurl]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-A9AED253-4EEC-4E13-A0C4-B7CE82EC1C22 + +[tdeKeystorePath]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/21.4/orrst/op-database-pdbs-pdb_name-post.html + +[tdeSecret]:https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/ADMINISTER-KEY-MANAGEMENT.html#GUID-E5B2746F-19DC-4E94-83EC-A6A5C84A3EA9 +~ + + + + + diff --git a/docs/multitenant/images/K8S_NAMESPACE_SEG.png b/docs/multitenant/ords-based/images/K8S_NAMESPACE_SEG.png similarity index 100% rename from docs/multitenant/images/K8S_NAMESPACE_SEG.png rename to docs/multitenant/ords-based/images/K8S_NAMESPACE_SEG.png diff --git a/docs/multitenant/images/K8S_SECURE1.png b/docs/multitenant/ords-based/images/K8S_SECURE1.png similarity index 100% rename from docs/multitenant/images/K8S_SECURE1.png rename to docs/multitenant/ords-based/images/K8S_SECURE1.png diff --git a/docs/multitenant/images/K8S_SECURE2.png b/docs/multitenant/ords-based/images/K8S_SECURE2.png similarity index 100% rename from docs/multitenant/images/K8S_SECURE2.png rename to docs/multitenant/ords-based/images/K8S_SECURE2.png diff --git a/docs/multitenant/images/K8S_SECURE3.png b/docs/multitenant/ords-based/images/K8S_SECURE3.png similarity index 100% rename from docs/multitenant/images/K8S_SECURE3.png rename to docs/multitenant/ords-based/images/K8S_SECURE3.png diff --git a/docs/multitenant/images/K8S_SECURE4.png b/docs/multitenant/ords-based/images/K8S_SECURE4.png similarity index 100% rename from docs/multitenant/images/K8S_SECURE4.png rename to docs/multitenant/ords-based/images/K8S_SECURE4.png diff --git a/docs/multitenant/ords-based/images/makerunall.png b/docs/multitenant/ords-based/images/makerunall.png new file mode 100644 index 0000000000000000000000000000000000000000..ab856f90ca3a29bbaece7f4d80b0056801243613 GIT binary patch literal 211874 zcmdSB2T+r17cLq_K|w^6CJG3sAfSk10fZ=31Zg52A|TR1sY*#yq)11kC?y~u(xijb zNLNvMM|zds2_?x{5%>P=5$3-}|ohtfzc0Rg~mtsadHJ2n4PC zO=&d*V$WFwg5v8wN_gcmv4tM~wZ~pkUVR_@&u!m*ANcvGgN&BL9oq*E&W3g-2vZwd zYm>9~#&#wqHuh$=4)YW^2?XKE~ueVPCp~pW2LVZXQ=XbW~iAM?&MEi|sWpe5AC9d;|Wf#5+x!;1?ehh5ZwD zSMbsm9pU1lq-VJyqiTllLmsxUDlSQVYjgd_d@hrvW9P@Rbz5X=YRQLGjYIS?8oZxb zj2jakzAHcYOsJN{*k7pQwnjK_b?q({-m|KQ;pg8k)^;hcDCxW~&)>%*Ifx>1z3FbC z#kcF;@<)#D#yS>=AC42MiV&1*@}#NUdV0}xu*k(Rv-ZS$OSeWpj7|v?F$hs&#W@s`Ec#070PQHH*S2=)G`we?K#H6-pk=}TBM?*pQ)|Gx1e^M zo#xf*!Me}a_b@YDpK*#GA2w2PbrqDYiM%$T6h-O%XylOHFEx~RCHdRevg95LC9#P( z1YR;wWT7^}5ht4ph77CNes1HORktu=aSoexE^WJ1#F~gruZD8UJAz{Zyk=&~4|@D{ zrGigqOm$Y4_ZVmy(#%&6sjN-YA>sm^c(;0oHJA0zqj48cA6GZ7+$E;?)6(OmYsUFU zSVpggR~-Jv56|yzh^?tVdp1(-jt4WTlXSS{H3ijhC(7+v1c$ifLqpCP)Z-h1IJ?2? zdkCwUk2jjR@mLAku&Ah+VhYj{Gk&JWGx!~sqNPP;roG)gF|*)co4!mAgH60K`8C_Q zK96}UanCf7aND;rdX3`g0FkiJpG_fX)8}g*5D@9Ke$K0G<22&Bp>+WLe7Rprp^afr zUkOXpo0L{`r-=QbXl&4c@D=Wr2DZ|b#kwqwv|R{7Y3U)Q7lY4tBMzO*COIlKAeZFi zXzFSE*tyz~aNnw2x19EDZ#3e6l)3KvUVqWtZD*N{{RvB7#LNx%`u7SU1gpysmSHhj zT{`A8N$yDA+1YsYGls^-w3e28)6*HV3MkX9N0D)@PE9B9SW!>7gL(%JRF7%r-ZNPb zDwZuLNxc5}@eAES*>=}?LoK&bgQKvU>H=naLpWIW@rP0Oi#dNw+cuRlGFCmz?49dD z8DCa(RM4jPbYfW+T3qk_?W&|-t*Q+4yOQ@GS*WZHI9+3^v?WFxi{xMn;uwjJU_KXk z%-efx4Dl3m>vm1t0Fg;^yg>%7awyMvzH0}mlAkw~>trw(22am;T&7@YOA7)) z$ItgrC;mFT5iyu2`K*QNn{TtJP>^0_|yBSGqwv;0gQyd7E(tEqU9Xb3EuFoN_ zU>K`*=k^r$&cR_Z#tG!u(5dnwJk{LpPMvUYaEQyur%0MFh@U<8^zz`4~#Xeh%-#EW!liL_I<&WqT=SxyjPM2*sME4**;0rNQ zpSyFj9@#x#o72}8YGV`kr7|%=JWXBim%3-b{*f3&dKM@bbbbXzVjM_L9GpXus1YB2 z=I!qna~j-gPcy0O>{~;otgcyaFCa-gh`8B#jGThWDTa;?eDVN^J%YG_ZC+WXP_Ku3 zVLR9F`Rxa7kZoUOP>J*O>pNw}ROnTEk?<28$e5`>wknU77OVk>ICY^X%~=iPpSqDf z5wsE1McQ~1K+VsurL8++c$;btO{EfLUbC{kD%0ACdR&roOyW3o(%~OS%+TJaRHsh4 z@az>;zKZzlk)|1+mGeBto9eBfpGnQaf;Y#D&a<5#Z^WxtHuSLt*$#NYO^dO6t8nYy z$jpHxKN>h|-+Yz*wYIPh^AoyPWA9J=+HaZ9`s~RQ%2xJ);=QYpLXQ{A)(2S__Q1Ol zgQ|}CGx%yPv8z67%DTqfh{44ptN8;sx+v_(y}#<-fe=U6eY;e&j*?{dBPPvky$f#p z^ZV^Yq21yqN1MN@*yr3FEgKy)jK`(6)VqR;J5v%R=I|h;Pw?SaNTu2wmI*b zv-40#U<0lMNt&7Z)QFT*klEX?g(vT+;Yb$V^!vz`#9oKZlXST7u0daFHh%M@slI7>+=b4pF8C>w>EC{w(YUL6AEkKlaKnA zkjj^L(_$zmQcRc+v8#^IV=~ujB-aU(R;EUZ5bKZ11OtmCvFnv_~TzbmzC7ajj>hdfQ&iYi8DdSs$pI zl=5C!&n!QTzH#5^iOA*?SDI-rlG_rWE&k!P@+Wt4Fn_fHp*v@Me3X!n6eJ@x{GKn4 zJj%>cxt4G^OM|ddoWM5uc+ZeVh^Z=ohUdiza*w#h%QB@5{T)NAK0Vr3q1 zn}~3;DoEZ0gka+oe?{+Injwbfk5hnj^Q!MFg=)U5IyW09~ zS|}d>*?r1_WxY~FE7MNqN9SuJwS5hqBxOqdImFtL{j;3@LTlHCdZqsS4W~0c_#@(f zgPT~1CG^tkKmDJ@I6XrABG1|Px1+o=bPI2NEHtTSdg1M@TS`2uTX1h_^lM*(;1y!* zJB_q>72Y@%UKIla8>c5?&U4lH#gVOaVtGeCp-jA7TpyP*vQ_$PdeLO(JQJ!X<^8OC z3pjasKU^`+zDY=s3zEHm|30>TAcC5PCjR`A+pF>M2YGpUD|XW18K`vkzv@6N(gnx9 z8!v^dU#;f*g#!lWk7l}r@(K%$=YAGd<}55c zwCc{0d5pu5(0Dw4-@LA_E( zt+9wY@WM+Jhz}n=Je7<;^zp#28Sf7tcoP<*Qsd)ywI(Zj$HcJhrl5E<+jA+#L1fX7 zzFsideY}H?WgaCmJFi#Qg_oFJjk;JKA&BzI$T<7FQ!BJyaDZ?z(p3$`xjm`zqrt1%;64X#ce}r?S<991;iM%lZ0JyRG**yP#%j zrUr`ac{I|so>f*#UPEuQK*wcnd6$rIL%OT9h>M2HZiBT@^bcqf&$sh#9#}O z7wxg&urSX%$x1g8nprY3`Lpq@EiL4i8YQ+4?A^Qfp`D$il~sDM0SXo5QS-I}bMN<_G4KEtzbBN&B0lPIE)xqPR ziG(dx6pAiiC*W?H#@9gQRMPi8;?AkP=v#c%dG2^hyM=|7Rhu?D6O(Mz6-=B_Iy=>dB zXlQ8YGUny!*{o|@cB+&#!a=%ulVO4#NTWr| zd>ys4_SX>+Jc$D(&Or>+d)wRE4$(KmoiO_zDde&!krEg8^uU3XxTGXcL{od3rcAxi zYvmMmH6^9MEP>Ofy(A?ieSGfM2x{PdZrAqkT*0wq6T>W!A=?`p!X36R7jMhU$9LzN z()?N;|B+B#E&a84>{SVYwY8OCY2miDKA)_bAWKiLs-eNOvAKDezPTsg(!BYdjI=ZZ zJG&ArUvN7~ZEHY2^mH?vYR`4%b9yI|)a1{eJ^N~r{^dIMQ~`3QQusHQv>5dkSZn3n z-w*rww&;cshUgZJ#IO^I#G7z@;Y-kdnugA#>88NR_{u?sp%+eVwB$P@iIwb}Xm4cjvTn-kyP zP#imUtgEZ*-uU-7IrR~(xLnglStX@|IyyQF_(fy4jj@Qxi>86;1qwBt@(pX2inc46 zT5g&j^={n1e^Qv07VgmS@UVjoE-N<|3XD2mOed^G&L*mX=v?hXUpdWe@OYQCwe_bb z%kR_i)tilndpUNRLhWR9-X}CcRD!f^pRQM=W?SX>zbwJayDuy(?B=aolGfH| zD=RCrYzHLD?2fas1yFKpU^-cRzTDF@N>6uaY<39eEJVXW_hS}(7axCP{xU-fG2{s8 zN7NNhT@F!F?0amC+mh4KB(;qnPr zHm{sKxra4?l1DjK8V=Ov`n>Iu{N)sUVgbtjO)t)9v23l!a$#vn^2rkspM6wx?82Xd zCa1n$s3u>lW?K7H*_;5bWKsN=`2gm3pUXB4ZAhM3S#c=coJtcVg-nl*jwUW+3#XP*5El{BdPK+R3RvzQU)S4q;+qQaQb}WT&O8Th-dynkf1`WuUH(YS*q^ zUAd-o{qF1fdPR0JH$z*IwZrQxD=&01WDN{ro4efH-5<^U6loT;fa-;Cz(qUU*5nI2 zOnbm^p3W_4ZTr*627hcgH{Wf;a6kN)8SV=wP4w`t+SP z>^3#yRqjFGxQgDsdg0>5#m(u=Y4?{AMgkHFll6-k85uHn?=q_0xid61m8u!eP(-fc zx_Wx)txT|`K9!Wz`zoiC1hNS`P5*)aLdNi1UkQFRR5|$d>-!I34#eCyH60nM^8S#K zQPW@M>e%8u-ygs&7Z{h5b2#Fhe&v+`w*>p5e1??GT_^bYX}sw;WaZ`e!=I#QW21$` z(d`(ii%Et~R$X0vZ*jP$lG!YUNbfh$)5Ch^%o(kG3r6zyuA)Pp5EDDr+}teLUpRn1 zcK7aGt#UV!Z{NPDsH?viAQ9I^$6Oyj{#;v2SqD>Al{cLyihc6FiHYZk&Fnas3 zPN7XqgU9LhxjqHsT7OPgS64en$0R)dahZ2%O>J$(Cr_$8e0V-Z zozK_DC$6VQhqJG@HwrOHxT~poBxJ?oLekW{+ac3rKwQC5QOB%*ep1yS-K@!MxA+y~ z+|TLk?5vieE^Ie?Q>Vl!L+Jpsph05Fo#a=5B503Zcmb_#Wmr*JnFrv=?t2RrZW|jL zqa!0y)Si`t6bb<;X=w>?&sfjjL+quZYRxgJcVJiN{f}i#swMZq!y{?!7UoTHHxir8 zV4M6%EKmsPL61#-C5HJ5^6+>=SNhu57js%M^18XXxv*8&d9KZ0H=^OZNK5CT2`;`t zK|EaVj$VAAA}dQtFTaj!BRZ2ikKNcU>V5l8gx!+KEM21RD|gq`aI^cT>g2`iq#r%%7{RF2+mnKt#$jui%|A}uG!EhLmp2HNEP{r#i{;^B11+0w<@W07Lc zHrPFj3Yply;qE4bG!uFcjO^LT)F|_c-_17mu3O9Vzu%#Jsudj?{P&OP5exD(e?d-v z@vvg|7Nd!V7#1YDbOGIyXWmupm?rc5f;w6`*I~QySiTOer{peA$!AMTOXUYAQr1hY zIo@BmRGw^&7M~^5|NeVjf;IMzNiv3poD9@5DDOo*3mY3*1qB6^cXI+6`>jl>-hc3* zvavBUv+9?ldpBC)yI?_wC#TuB28Oh>w2@aIam~!k90Dw>TWIt3pmgfCaa|zi)vGKP z`K(ZJ6(Y{b*xO%3#JzjBo7!{tjT<)*C-fe$Gcumy$3^3lmI@0BxFjUlKYskElGeWO z;KAA?g@_j`fb-II3lG}Z*Z>NvOxMm;oWeqdF#6S!^w7zPjFy?s>b!tDJ(+%lnb{j4 zW4b{ZE1VVb+hfID?-!!1wv!@zB~mtPdN6>p9=p04Z{ruqB_8&Q*FiQo8~gI|^WQK? zIgdp7iJ8@nPbd_qWwUPead%a0#2=H_Rh`tAA)ShD;9mOINd-S_%`Z3>U%q{-G{w4m0al0cfb;TY4JVS-&rkHG4bc_Mz?=fW9opVl*21C7 z`dwfRy>2B*%E;_3-c{+o*Eh&dUk|IY2UP|sj2b8U_~Xfd&=7SeK$J* zzPSX2c0tqaKYPplDZ0S0ijwu>g9Arl2$Q^(^vcf8?uChvkPyS+!(63ww6sZ2mdE$c zFE1^@Rr$gsav&U|DV+9x6b7kGJtc<=aaN-``wJ zJ|=GH0%(gvJO(v~|5Msru<~@ndD+?Y2K-^S%2s%=Kb56j>c5{M<#!r*L_{!utFONy zef9Ce3Ao<-5odXDTH9(H9?zevXluVrOg!1v(ZMAkaA0d~w)G5oihv7>!Hg5rjXH^r z4i3VfVv7hoa>{wizHQr6$Lsp<&8=m-Z#lhw{W>l&aZicU?1dt?Y$DzM{m*0*D!zvD zghWO8!8qi+buvt7yc&LUBEt7i)$Z7O5z16vUJmn6bcNs(Kn31vDbgk;Nm<{UJf;9V zyyzSo8_Oyv@JdcjZV

C%Uk6rq%DXr{2boR=;!LcMZvbj-=m--|HI*NGXkTR!H8 zvOx#PdU(<>4+9W*y!_)zNK6bZV&QKfXAt}Gz-o|(2L7Kbxn;btZsgOe)IYOMwvB0c0QJXCTuptk^k`NQzXz5lFJi~oh2M5GhNx0%T%!^ zHPE`c$BI6EYJy|s<3oNEB^|qBl^1R6nS|`@5ZFA>G^ADrD<~|it;1qa0%(y9|H)~#DD+B}yphiiP)ONr1_+EyDGIK#{96E28SLK5-ZeMR3|L)WFHr#mx~0Vi>By#>C<gyyEf3Xfka zzgi!x$Tn8X-S6MNP(Y~vD#-YUhflYiyp}&VT;nI~IHjJGo14HXOTNz8vgHP` z(YZM#zA#_ez~l0kogc&ckUxI>$a}nKWcTpl2`GV&9zAk^lON;gACni{i!p-bK_ank zZf-Z@RaCUJB5Qhh?&7qzTt^Z;)!o8-B>Y48P)Z~i%vpua4w(zHT3A|MOyF@jEy1-s z5s^KMB7dbFdM3L({=`&G)E~@1hOGBjQAfz_sx*P{TjK z9;|C0uf2A5F)Sj2`@{*vt)#;+XU*hZjdlKuLKHoa5L3RlyV}D@X%%HANkve5_dfnrK* zUN++(gxTsHxCc`goJN{@z}}R!tS?6Df^-wzVG-aCkeR9Qv$~X?l7>sOoVB&dp`P{6Ogf7QkJ{Vy+oGWH;Ft$Qdbr}S&MOi7%w7HIhzK}IZ# z$nqVl?f)55*$vD$8K|sXyYRSWn6N$|2WxI+b-=*Kun7<)j7ol)<(t$=Nl8HP$hx|g z{5zT|6I?S|o&Q;M1eX0eAaQmi+8?+b8gm#Y%)g^EOl$kU!n&K8nVE9Z(tBis*aLv)B-ee|$Owd>px_~TYHBc7 zNblj21gdPND-l}%e;`Z84v~k0h_(5FAi%9~pwe~nza=^tLn(Rr`SbPTq7FB;!lR;S zGBo@E=0^=&MIK$!8yc{hVdLfW}b&ADEOy=4p(`h~VAw*8#__3LNClbj4^LkgAYoU@v(H5PM(Xs*`It5?-R@Ba%~%MfEY(ZUvOyhDUU`A{>MS}M1I zrwvw1NemBX(*C8k=)O=vCG0e#HAHxIN?|hKO8@y1Hhs?*FPgkL&}McLA|u9D#QLM8 zkHu@qkj2G$r44=Be+RP~TfpPp{8x7L#k$=Zhf(5qgx0p*aH!GLwwv8Hu(srG>*Le$ zQD4E2=`0u5J5V)59}YeZu6xxhxKo`Psmu7!LXGq~r4atPySqCvlmnW6-Ny37(ENN5 z0PSdT*N#CKHM&BEG|)#44jTlqUZQ4WWBc@I#A}8Hj_0{jr-u)(8yhFcucFG`j#_r4 z0}gu@1>Ou7v7>&t)KqF{Rgo7=d>@+QV1D!22UHE>X-kV5l(6Pe(X(SfVfgs_zkpwo zjK8j@7u_(+i8yamy>DohMzHiLXbC?{pWJaOj@(Pb6bF!6Gt)rE(vlxYpxeb@C<2P} zpV-dRV_{s*ZU=sl_h>RHAmmPYB+$PJz^X%mWC{n)B0eQW-uBpqwsK(XU=FP4?9}8T zg^tc4t68E@YK!$R$uRRZD-x;+;_0oVmM1^x2JZjvXgCK^p0giOxWGmv={UdU$2Q&U z>CD66OPDOBqo$TLF*)_>)hiWUUB9fX^E17L%9+QZ{w4qcIBjFyz3qOCKy>N`l zmkd;r+7O@$Gc&WM5?Yy5%xE=cdHnWJ3$X#0JZ8#_a-F^O_{o#_($eG7FFdH|rt($E zOe8udD5<69Q*kjD7uRlbK>@7WoQXmz#)!5jUbY%2XFGN36z`cc6(99W6u()XF8*hrNAM48rBY-!)E6!odZuYDMwUk%$X7tq=G#G5}F=K`@m|LU0&VGV`&zv<2F znH_qw;{PnYDYQdxmOAdxo9pl+6YP?kw{Lp_)-uo)Wn^Zi$*lkO?VhnQ1@KheaFA+h zYK8!8UIl1rWMs6WBq@LNvK0#;p23lkggcq1z8vK+POL@Qkd7V0h%77k@4)Z4i)dIR zpc29zWjg+eM7_8Yr(nO%Q9B-?zHK)`@f5~LKmja@7cV2n0Y;ZfEM!f!otve8! zH9T0i=TPW8_&2wFDNjl*d^RP ztk+X$XlSrpu{kcWF?xi2J*DEc!OtkDDr;)?qA-L@PqF0-M>pFeU`ibXB+Ps6Ty3%A z^m*If!`IJZj7J1b(em1$gpdhra`^|w zuo~3gtovUNYd`s~#A<`+_#6_kxN(XJI3Us6SP3>D>U=ANjhFAt;pKnLt^HxOE@mtb zG0m5)v+t#(WOc{O7#nlT$jZI~i8X)&84IFl@^akQGS~GMkW0Vx_s8n#>z|R3Fqq)< zlSYbvu!WLBSs#c&Dc`0!!gt5+k{`4~a%QL(np2%ejry^UBQf{^m^=H`tzYOBr? zKDn(iV$Q(D1YEZH$(x2r9PYHpUuh7V4=yPY13r~i#O`E2>=e| zbfWTG;g>MT$OE65sFpGrrfCr0TWAaGeGH|icRYYaNYd%&`>8pPt1lDlC@F>45@r(t-ZZnRb8EdmgW3Y&}qrTo+w@M>s-uEL^OQ4+!EnjvhTKI_BWKwWGVw zXeXWEYO%;?5i;6SVAcIRGBVNt`DjTYll0{LKe$reUv{3}-p0e#FT3*0L$$}UCj64d zBZ?lQFZ>2o5411;7YbG0(!!z=WXy*^rr7A~G-@diZ+!jT_O%+bcA;HV`ZZA|||(#}V(xjp{)F%KBP7n1Rl)8}IqpwF3^oBx_VFYo}q z>^^Ghsp<1wM;`xKPk+Z%y_mdvih+QTOX{0wUjy2oyK`%HJPoYPiKEy}lOXAFmV{wZvISXes1 zso>VQ2h@O%kI!{o-6#WbkYuD_j2dr<;V&VU$C8W)t{Y1xT|GVTpgU`kKsHiIQRn65 z4QBIb!~6XZOj*3*ycNcWYQX=wd9mXEr)gN$|Nqdiahjcf{1GwOuWmgPI4u&)5^yfq zuRa=nk%e}0x(qywz!nor(TJ@TT0Q_*)SIckVOQ(@XQLi?hqdm?dRFUyRLTp+Rx%uo zjy`Uvt*Ceapy$5%|DyGKn7q|NYyOeWe+lsO^2YJJJ(+5WzB4la7AYd~2B=Efk09g7 zXK363u`+80Zd#%W?}zvAWi?7+GEY=S64W2#K;*S^a_aa^-fVAg-w*4&gw9@^Zvz?_ zF$w!ewEk@KZ?Ya67A6DC90)v4vpPC?X5b@%!qN)e%Hf;H-}Z&q`WRFrMuZUc>Xjsj z+Dt7IKceKoF!9RK`PD^Y?YO3M?ZE3<&D8C=8RVa%)kryX%kK}|9DEt@57?(QJ~5G8 z*#EzqjGg~~BVz*yruuxa2oXj$Ks`saqW^}PeXB$9SXauyK~S*B3U7)Utw-+qe}@ZLFS+<97+J?aHl7 zqEB@{_-3S>y6u+yY6QcO|4prrS+eafJ)^hTzhOGp zc)@8F1^Cuu`b{s!ZUsp6n?Eue9_@JwGvLT_BeD)qeaYR3qGb%wx&TUV0Obknh>6{X z5#iY3!@Gey2<1`c0+$Z~HrbjyuMin~xrIk!)Rq-eNq^lt_Mt@2%uGQbEs~c{Ww!n+ zVk^nXxd1lcCnD(?8AHE*rQ|BzzD;XkVWG%}`&R&|A7f>0y$hj}p3Hd|sz$;l(_ny4C}E)`P;)Rr+_S%zfdHYkYhXJ4O5 zgkUm;k$UfgA8$Q@{@}iN@d&8JqUq_Q*wELoac|a`&aLs^E&sXr$J{UOAv%Ag$bIXw zh^S}_sHq?F@?Hb%)4e4xPd~@~($6mez$37B3D6p2g;@jo#R-JX)dVT;gQ|}n3Eh5k z!E?M3o9T@FM(+C)bl2q5w_%d9d-N#YeQOP@Fz_Z?_G^)VN8aC&1$fon)dgtUdlJOz zww@k7F`($}mM|;Lat`P0iwmL%Yj>9RQv_F;~gTN+1LLAcC2o{ek|XqH^dgsyrIKy(m{~ zKN0wcp3Ke3;XQHUDNL-HDCNb)MQLeai9kiY zUr##A$@vUmlzft!D)27t9UW75&1MwR)Hg&#L`DHV&dtpU8&>WCdsgdKi1*jOb{ zEcaZWsEAbCNCLqI=R9vfmb16;W`EOWx1Kd|kmRNOy(QJf$%W&Cxe*$ng9;@F{V9O1#q(FJd z)JRLZ;XxT9n-C9oyxXzf92tV&e&yTv6Y8Q{3ULQG`HM#}NcVkWkIpvr(MTij+AOeZ zg5_ex@#@Z~14Qoy5Z(R7{cQS*J+JgXrm&xAnEeN0dGjI*L)3X+bs))3XJ-7f8&)#{}I1tBTRDp(QaWn{9EKe@0%7!;>kC*{{Js zvuDqq>nZ9+gP)(38DL=C0;7@*@N3hK>xnyt&7`_%a3WOYnYT8qskY01-=o-JJR>MA}bb}|D7Eg=H$D}xsGz7QZDjPWc?Dh>spG(a1a(ScdqK*t!`fym)@y^1ckyyLoNNg) z-2j?0&C->FDTAo<2PcXSdHrPWe0&(s*Wr zX991S(ufnUh#o;%Sn!cm6d3Bjesl!*b^u=`p4|8S#b>P!oV5br2Y4blJiMcKF;bu8 zfAr#c@R~@ZVPED}<5vTj$^d~YL0bv}v+MP=cCyz&T#eb-U%RK3xRF`{ZYl5+`onCQ z0M;NNPl0Z!Bm@8Qg8@Pve*f;s{TM+vKY-%^J*;x8qeG**uC5CHAeap*Dk@;iex+3O zUs0(B0nr2whL=ZpD#5`3q_Zf8K@ht*-*(~mrm;uxGg+TEKyR3k0WPSivdC@LG?F}c zB3a-7gBO^aJP-Z{I93oWheHXQsj|PH9jb24+e?-R1Qbc&v@63nV+e;B8BK)IcgPYR zR1KGU()I+B1~nJNCguf*8)#VY{R!w7Gr-VLS!^`Y+PYXz&(3}j>>hB;3}JwS4-zf( zzUGEM1Xc>q+}w+x;$@pPbCr>*zr_rAfgnONd)dL&r)}*NBere(zccW`=PsAq)y;!N z3(&)9u)hfw6W$~wBv1gkaC+hh*_(Q?5F?O{VdTD4{uGbDX+5B}__Z7N)zI2HATaP? z=<-)sqGw>A90D!33fz&4;990OS6ftARVH)r|KQ#AYE}zz1X^GmzyMgBr3g11WDxL~ zeX8j(oxyW54unp#h=FxwRUu69^8d_rEP-Wd4JGh1V!O7HkB0Yv8wif+V_?KM!2}Cy z4U_zyo}M1WOC-5 z70>tj`1w^~N!zB~v&R1790y|a9+L>`A;H0jW5+z0jac%)yFEHSKAe_WE(xZLSD~S_ z2IY>>|G)vMuC6{o5PWAv!AxLYS8RUzTr_PM^b;)q3<|_U;`Wnt z6wV;?xX5zvA<)1Mg*4Mis`W-h;}ge80K&7t2jAA&DV3pN+@5+@@@*IX8ygNhBj2$| z4)9-ma$WzZLS7d~rLJomovRX-0kYi1Kg3;D_Cc|gZ4IC&Z?#-$lEW2=qUu`a^AZwj zv!O?@ZN!YwHay%+>a8**yNO1?Ak8=oW@WmQ4&;%GI(0!vq7Td}a0j}{TTV^w@YK{) zFu0Unzh;1uL-*z};AY59Cs=etZLoNP#sr(v#%|lohYguHJblt>+ofkZf)8co233v* zN@eubN0M5=?;ZncwP5mWq8$>;(sGQz&jhC^^W#TZ@CkuMJ2*Br5EdvdJ^cXKfe|Ol z+`o2{9M|!ul1Za;IG5+qBi?NjDElqc+yr*6+V1}lkxpa zut!>wv1q>1fkImj5iv1Mj7DN;xVLkOhim_Xfz?> z%yO<6!FY~%7Z^$3fFMr&ULGcFG$Be4phXgRph8kpQ(u^jH((!ru){`w-H}@ zdfpPZaHNvnMPxS;>@A7vd}nxgIdCX^23@;dUL&Y>fSa+Bq-!ACJ(!DdV~g zC5%U+sBR!R8x1ha-}crm--icIa=pD`%YN$g>02FVBYrmW)UG&?-m(saE+hH*{lLEa z(#LGJCqEC$HWwE)9bGaE#$To|DFkd$onSUGca&Iv+%6F5C4tg@M;dc0KO}~JStIC7 zDzof72l5RUf-Lcqy%g=RotCBMZ;^8_hF@Qi8SKlyM(e=e^$1Qb9yn+bk)x)Kv6nu@ zwqZ_JPhox~MZD-tkPVQOlcTcv`H54k7WynrCfo7jO1K$T%w4sxO!yfv05N{A<+ZlAvw|dE^pz46hbKhrW(jufi5OJF0Z+my2v7c8Lo*3C;Uf=~&Jq0A^5+C$wX1P5o13>QC&waD^DG^s)s{!F`<^wLnjAtNEquDJ9KS#Wqse? z^^~;TNRqyBV;2}l0+HK8bUrU%hOhyjqOPf#kdZ+P1cao%K1X5^klnAt!-s*OFtfIn zdGO%93@Mb~X*>7`$G!f=!Yfl(3NTPo8t@IV;>X~`1MODp&fNQ-POI+d6Cig3(gOZLO4$X$e5Q{442>RH~-qrHqxVRJQ8X8<@&g=&x&psNO zi}=^h|0}Z70p)+lPDLO)4ViX72SXMZ0l$5_1<`>x6b=m%12_gf7fuoW&Bt{wsL$2r z`@fm(SLP-~=ZuznkJ}Xrg03`<(@1M~?JEjARa-bcs}W-|-;+-RdfMOsX1fmag-;Be zhMk!xIdBve*bdmJ>0MvnDhw?m7^e`@Sh5%Q`UlaS=#vmx)HWg&z-hSlcmV-#uao0& zKPhn0nTYtDiC*R3-t5HH!uNPIdJ^;V%3of9FZ3~{IyY;;fQX#|sVyaRd}@kp`vj?b zA2|qsLle$}p(k{+R?i4*QyduAVsr|jM18|(W4~Wo6~=u_#>}Pzt+u?din8}5K6VTOS1r^8O72F0buq2Dbt@*0@dK$3Q zf;0P{!T_BbMIa<_IbsV_)}J}UDE=8nW)<+YKYfZoT;DOY-7e?)dc3wkP@zUsPfwuq z96!GYcnZn%N06;^oh=|RaQ5U>2;Zsw4s!?@i_cD`zJE{oS-FvF@|Fe3B;7rHl0#t@ z1&GngE$NFjHX9Ge_td(r9cXDoaJo9W=V9`9P~&B|31|oA8pv9cv@f7jh({&duD-r? zijSXP=>C^Oi1YXp)liPehS<)J`cjID{{0cs_5cY+YfT$|pWw?Kz>&Sdf;Tz$!H-J7 zafX(bew`>x-{|76xkb6a^5ZD`Ad=lWo!S->Fs2c~G=&gRgjN1daJ%oXzBTGr4tVnd z6UKRM0Oat;_=WiIOc!cnnJZPn$@Tv|5h3#Ke?1X#)ct=;B4RZ*+Uj2e5%LjhKnM?x z1R-SsV?#)1VX=;brqJua@vDbmN8o{L*|qI(J#;JW==7QqmOJ91S*i z>VpSQL#_c>adll?{zS-u96^-f8mL%riSyn}B{on!asndlAeoh`4LD}98l=9hlMzn- zI(E*^Jn);4z)8%v?3}88xHv5HR@9LX#N_5+PAT3~r(^^hK=FsUfg3@|p%;e5D#MTs zT)w}=q1NCaiA^7v8flpT!$n8A)2_;VP|FAWZV;Y?L7H*@p1FinzKDk9jZnLfI)Uz05hFYNGA7u65*i}SfEqDhx!hvU2<9mav+KmH+*6s z?`8(kHYRHnPxgp60M)w8vd!p#a-(Ns3li_fjn8nvuK6!~ z209mPg(uD~4$gT-~g%w=e1Wn2acnpl@`pXo^VMW6#uMqJ5f<$}= zY-fECkMtS+AqZga#trGjb_*f!!Tio4zN(Sy(&|4ZV}uPLA0A z$*puw&Flv|v?Mec;!*_e9+>jo*&6PQJYgAtiw3S@IHRzCs-sE+OSW2)102ww+B)OU zuYG^azGe%wU$yqG&p_WS)E=|F2h@lA;cCG$bS)9g1>^`bIpGULq08!vn&nXxaSV0d zsw<1s*dOC9bGHD4ARVkqtvdK|A7A8$$oJ1_-qNt}hQIxEs%SLrb!Mc`>$hR=Q>+Km z9%O12d?+$Zxfe2~{PO$WBexlDsqMd`uBLw3&}(bmdp?@w!FV*q#eGwRqH`bpL>fgJ z?UD7`6{Ezbwr~0Zc6X&lOuj&FL1@yCJ zOlC^`pNKkZxcAg8KY*B^N4Gh%&?Nyv1c2J%%<=$oFjN=}Y4f1R-PR&|Q=mFn6hm{- zerE@o7T5fC6hQazRW^k2dK4Js0n%o}j@mrvdG`ntlUG(&mWyueAFVuVQtx*Per0}u z1I(pn&F}V-(F$iqTpR*;7f;xzE-RDIz#uklqM@SlRKk7pb4LdgnSiq!^PRb#xJ1rl z12&0MOBsYf91sidU|v8#P8!Ao&TYGoWdWSR!s;DMcEb@m<=}4$2?;T4N!$YY-FKoA+4<@g?2d98xC3l2+s~>MKMP9OG3gkcrLWanQ9O0!1`G$jA0D@;N^i(nxLv~EkFbN**E5%;h520giAYpI}ccyvn zma7vWu9F36yvHssMo_}Z`3@L|{GicHhB0z*1VJ27ONVG5uPVhsGMep8QecL6a*2p& zM`Gst%glQ6Qf74TDOzI?kbC1N6LlaRRzcEY;ucfNCTPDNK3Q%Xc4b|o=>tafQQYV= zY9o+69c|ljc4W&j5gOmg_)P*r;gJF<<|`zJAf))h98O6x7A9DYiHY@l?)weRLI5gLP4ONLdC>P#)lR9lM`W6Bw@3UT06dmc+btjq2?y-7VhLz) zXBeo9-8OR4ANN6Ep;Z~lC}9if1u}pie{;(HX>x+g@^@$m!Dt3v{+;c z7DB#{}Uwj$fxW00V${DkV7>VObbc3L1U*;4 ztK9@b@2UEZ3rU2RJ}{h3)-aj? z&Y-texSa~+s=iNChrCtXH&=un8*vK?(g8y~*_IM|{zqe@vTZrg{?sJr$);TZukoPT zo>Gqf@|6T2Smx%aE2@AZJVuj#ycYsQdZkkKaBWMF><44=ZcxX`;YgUb zVWv>!Apt25mupE)K2OIPvw;aunG} z;P{b*upl|U44}2+DkghvwmX*x)|s4zClCDKss6Ys|KaTi9V87+-w*lr0;(rZRN$pNEApk^_5+7gtVHLY<%{fm|(>qFz~F zuV5MrkHr`Pui&L`NHApsM1>%lYu#lCL z(*nvkxdz)>5c0w5_7Y%DMQ?A3>{yiLHkh;y1JMF$I5_0qGuvZ%nUHD%V6gyb%^0RN z7NCCmBOVGlb`$M-m3^~Ub$yF!m94+(BQ13QsJZM9DhuC67`W_*t05~J>_otUlSw2v23H>hCu&jRnzrI3+o@<<03Z z9Ju%(mi1`x^De}C`9re-U)8YWWFCRDXR`tFf{wjUd2VN!1aLNeqGZP{P{Y@OFDC2Q zLw1jo)Lz2*1yei~RQO5`%=R` zkOWQyRq|6#$dMIlTG~@UL_l^r4w5XB;y&m3_90f3pr3}>6P&)TQzu=1$c;nQwEdcOP@)y zRt`B`76DDAjd4~|?aeWG98SB%C4kuKwkmE_C!-)Q&kp4`2FdMAl-UyliGO z-*zAtg7lCL{8epcMF`jFZ~NWn?KEBIeLUp=j(D69?g zBr`J_7WB>Zd3Le3T8t~=IYhYJ=Fkf#O2-?@GKG9n*ilkfvOesqkluG%(ik{H$r(~`4hC5S5<9}b_OjXNV``swehbXEU?elNo<7&j|m znTPG8hTnsSm8emq)pnR&>^NF>StNd)iP6QjKn5!)&^$pyv(jz$jEQ@cYUI$Nr*kPO zEb2PG=m^1tKC$_I<{CCOj{yLk|B!xQS6xumM{C`(v5cQT2T99~Eb1CwHpp#6)c6mR zh`y0z?BLdr_vDk|J(LVg!!KVmXqA-E{Q4#v(6kt;d_FGg5QXi4Y;q&gJLk~|YmMnG zLd`d45P{v%TeL*Ez!T?rsLm$emcrFXG|z6$!;|`6-9?^(=lJz2L=1oB*E~Pian_+b zU%_lZRzd=9#B~=@#J-40WXLNn%P02ve;%{qpY82e8p_0Uy|ewJ*|BD(TgP+_b6UQ! zYB&YlGcl#&KgO@6!v_xtxfic~J!Q}9@Fk8W^VzvT8vu9jT9Y5;$R;$WA_hqF|wD+;S zefK{YdlPV|_cm^P)G3vOR7551$&ye}QB+!pXhAfVN=Ootbt)7gZ73}$`;wHsn35$C z$ySIYd&n}D88h?$e5-Sw^ZeiEy{`AV&ULPH$};nt?{eRt?Ou}R?=SqZK5u-*ecqW{ zi#|kdls~_Af!pZLhC+XvIpM1rp;FBm2iF`!7T7a2u6M_e&tu15Q55?=30=!|ElCK! zUCTG`6}u~Tpr^BPV|%{1(X8#2oG~Th>Fn@esp%+O;#Q>;+>M?amo+6~(yHuLa)Lqg6?D%HEb+XjtbgYm z>ytd?0@h(z;6ndfI%C`9}H*Ym$%fP?U!{9=lmfNiSR&ndh|k z(9F8o!u#Dvq$!I>b4NccmEkw6iiv!dG*bq_Hy!2{_B|cpJQR^8SH51vzGY`S94|iH zz?>{7BSG0y5w2O;Rh8v5yy{fg2F;Rw`s1-5hMzL5mnV9f&)Kuwh(0)D-Q}6vTDXfo zgl%zHF|{dY3Raf==)1YMLQ=9^s=kl>QRv8z{FG-4vAAySkMi>;&D*nlkL=-ZwYx7! zS%+x1?t7p+Mwk0u^EtzMicj#|tL68FV&+<&==O-O@s#Af@GVIi!erB)EC6aY=-^;Kl*w`qSX60lbyND#1lO44D$BBI7jJ@W&hoDD;Ym~XcehTnHRD% zuaVQg>G?;!>DMO}(o5BL>mD4qt8iw+#@{@gy+X|a$JyD4h6A`k~-Z@Wo&$*iybi?Ep&!~Dd zcvg3ZPk9a2X2?B0?fpXcnGE6g^k**@T<9wiQZB8FVxKZfyP)kqZ>PoAVa-i{ha8@{ zF}k>X&)51OpSV@B>s;F|?M&Xr30j`zlyL5%F+96R! zW{x*xytXM{uu58{@@+A%c5y&u>6_lJ*hKK zFS&JcAfNB4Ri%0s!&^~|Z2m?5{$4C!p{GM5myZ~TF4EDeE}{RZP}^p$6c!*h<@n>B z`Sdyp2fXR(|8>TCP5ypzV%bk7XW*kd>6 z(aX+R@BIDkGvh>^`T8Gy)jgg(c{TkU|H`QXJ1x}Ih9}xYHm5qL>|lZyz5hC2RIqsQmWAC7nFDT)Y1hwr zRxWAO+N+V-pQn*2;rS~0vF1RrxLx%sRsBDo;yXZCEwdK{c!tVG4qaQAKg?%G52V+o zTGWuyiMOs%lI@^Un!v|rf4a|j^E`>5i-m=qVYipLx#&0i>+spOeOM#-$m~ni z)={I~J`Ke|{&FX$tg`K6Q{#E0N*6au4KD5;sM5&_x#O#L_(@W~Zg;_(qiRKzS9vyv zhs)l+@?5i~W&Y~dO6Q;O|K8_GW(XC-k0V#LV*{P)xGjNFyEn_*o z4ZPf?np?%R0~fve?T`_*$r^52G)T+HU=Yr1%75|>lUiw=Y$-s@ao)7 z;2_DNq2e@?|H7*>H}-`p+0;hBj+fuTJk4eG?oat+oBz>bIvlkFR+b4iyWdj!#A-Sq znrYLg$7c@qRDo#umDudsM34eZ#vs>K31#IudaHc)1pIW~ zK0Y-^#{aqc6$HEv8~uRg7E~KC_Qjy3w!mx6@6f)to>=AMXQg0AEqU>U-z&N?HBDs~nK*Z{bA-X>8i2VV*Y=Xq3dYy9SpO54P7b+>= zpdd|{i=<`v;fdD$pV(59JD?&$_Tk-~8T`JRc%s`<;2{6Qt=W(EnSiX+-vt2J6e3;{Lc9XcFIzM)G1#J^Yh8@iSM%C}*N{x4qIrAZm^$`eDBOU}gHRGNG@WmV zuU%X`QAA>K6Q_N#vwKYuI}R#nL;>B|;O2p(7Kh*Fi~|+>is2o53%JH{kwx2Re{(o&982Bz?cKW+{i2k#boE_3JkB|D z=0t_sp>rXU>--KdJ$eAyj)&3#q0N{%GYYs~I802P@UB*uA|kcm8`ACIK7%Pudl&s5 zjBA~ckc?59?;_fSeeU_hJ&607Y$AoPSx$)Pun!lF$)SIqi?oTceS6d zFJLguTPw6-O@=749+{m<1Cr_NX?VdvVrPU0El^g6YzM(5Qu8HloM)9lbM~vrNoDCk z3jFCF9KA)ImfyteR+!)Zm3` z;u{{u?S&SAWxTn_h_weFBNj}9mz`w$VK;5V9~=)&JTA>?Y&4!TcWxw>Tz;}Jz~9A7 zpc9-#!Kdm447@r^%i(QlsRo<~Do)+Yv=WU(ln3tZN#cNpCqT1b1p(Vs^@}Tz25D+# zH5~z6GbL1w7dy=#yO@}^4!N86I;$1^@p31fBNCDSpiW8P5;9fsM!VfdMn{XGw-Q9!B4n!& zGn-+ZxsJnqD$2hzX`&knf!A3D1_qrEA6K?ZX1YVVLm&z)e%H*%UFkvh$K*_MmNlwG zvJ;t&zL%q18+aHJw3-H&q@<+bkJ<~>{u|rsj@}!Xl5@hEUHFaGe%BI~TrY&33JSqT zXnrQ(@L?r^)B#CL7VH)bPF*{-VJsn`f8fu}RuwxWa~wc|==xK4%H_h0z&NVVjoZ4#SmhTZ2Lv&}&q~*(yC>spCn>#f z+|0|nus0p%Vmv5>y&?s4762e0Wavx=aI)k4`K+fuV2JX@D<|3#D9Q@KIQ)FKCZ+Rx ze^(b-x#Ze~hdTeBc$&1d^oMkt!(ah-n6L|Axe}BWo24kc5L~b zYSso5w~80axciY@2T&|ud|p&NvQ+Ahy75CEsewxp2Y@#$TM>vmQg_A4Ms70V_5ri2 zi5#Az92{0eb&RmfKoIqHS#RG+9Yc^{3af?%>c@Yj{+!TC6I8faMdrJG6#xw z1qvuP1Ys#7v0U{6vX~bvSOAf)+$Podky{F$f3mAU*{(T{#Lj@6_y@fD!vz4Qn(LZaQ|)c6 zEPOz7Vh$cU^aue&XB-^7ko_5xVw^o2e?>B+V{+jf0WnKHDIQ8>B))r&D-93b$)uRo z4t#tkG-My*_Ae#%C3e+R3OSAAQUF>4Y`g;6@xgI$McyvJ_65PRqW&$B8a3;u=bTof zkbK+~+F{;6U|&P}0&+6{Fx6vulz$EV=~LaYqythgv-u%??QvF?D21eLr`go)OVI*& z|B}P(S(gW%PNqY>MHX)7ZG#}g<*kV}?Vs}Y7RtqXnz!;&R9Qb19J{NhqeKOyU*odrphq7qisa|k zRdY5RcQ5_S@wJuszKH6}An4TzsRwIht}WVf6+8T~f&?yXa(aTD1-;<-5peKz5UTD? z;F0m)j_uP01T=Te!4Ajvf^8vA4>UtlC`1xZ zcpsctcrU&Xs*{pl+AhfhrfZ)Os1R47rSPeMuSO|yhxN_`E}mABN7TuN#mkm$kK=-& z_ilZAS$qlTjSkM}q*HB#D`vM6S-7D97l1v9|@P&j`fVTISh!0QT zS!?3nvn=WfQ$nt@GrG`<5$`6&6a}j&)Okn&go;sS&E+e(-0lQ6C|i=^`<3h_$vWjEp1susO&nr}g2d(SVR?k|r*|+1N+bjyAGiJbZ9=Tb6kWTG( zolsF$o?c2DC=?+zn~{+iNj4%`?x5)A?i-Xo`cf20OlV?6LJ;T+1EWy8{kd~HajEA9 zSj2N~b<-?*g%>T7HO$E12k{tzqU7C;=WIm5U4Ox;o(m1o2tAd#TSksyL-WWKFs&bf zeK(t-P@4y!KP7iWjHs5%9&8e!{45aGSu8EBb;#XGJApeiG{hs?%nhsO#$2=Otx+{^ zkcjW~GGKon`Lz#bU?0G#a1DHPeVB(LCbSa!K--@ciX8yN!WXY`*uL5H|^pR-Y;n^{_3f!O@c z@(enM$p--iczhlTg49rDE>==HcxNTT40X0J<{)*jWW4T5jwt;g6k)G{G?I7|e8l?= z4B{LjAWw5f={x`BlxWGcAW?^c!|5Jc%aIXrie=Wvh>a;bm{&cLn$i8jBhC7SN9a}R zX?8$*KL{LqMt6-E^={1v(FXDeoN{S5OAXkILWuvc_G#DsG(xx{586?7X-&>~ptW4K zGqHUjV-`UiHLU!#uEQrQy&j?7>1tHyYHhV>@2(DlFarr{yf`v$=W5}~=7T2AGD=3Ti1Ff28&V z$Z-(|HWtqv87?bXw#W`J0EkgDJG&sV)zIc{*f{H=+hN`Vb=QAAmMA8DMX$way#@hg zJd<_l?#d0t%z%moTA#_)5iA?)kxrk`fejSeD`Y=a8divZEnY@1$YuTAb&;6cd6RHVzJPVCy!MlE~%k>*tCNy@Um?2ey}4i#q5 z0pbo>MvNaVW_EfEyQ&!ehv&adM{wE<>@$q26MPbbv*scyDcm8L*X1Q^l7k^RCsa!B z-Q=%$YM6z>-Q!_dU>ea&nqNY0R?MxHF2Ph|-svzCBSgJxCJrat1wh zL_YX_Z|MErE7eT>I<3{zzL847HEY%wS~0$;Ua)F#jvp)hSXiHl=?;{>FBxBZdiW58 z;6M3kK-{8YVwNCyn5++M zQ6L_(&<3^O0#hj^E(-u0DXh2Rg0ON#L_~$M&y&i0CjfhhYswItRZn?@S$CNcAC6aBF{Cuz7G$HvEBS5@hE@VQ_Y zeg*CZvVK~X=Xje4S~0Jbo3_(>m&Rla%(sznC|H`oPAD1CD80=nUSO@~Y1@4`7;Odk zW5;*IEB;e@l76egN(bBK=x-@uvNd@y2_YWU&FkgaM=>*dY5el(+#Yl8vMZ6yEm~wd zCAEBf@o`d?^K?@PpL{CNa(F@Y@X)1we#`5NaOZd-;hdx*fA(RcSl{R}7*(zasZ&c` zDDP)TqWo|Kwj%i~ALe9|-V`TK{$@xIp!Cr=dv+Z{hMvMB{9yBwqq$eHK9RX9 z4!ZsVO0e=5m2IvDtQ6S>xwUHrNt1vx?+k0imNYUtn+Lqh<;IJ;?cSo=77`Zr!cN}I z95Ir#!J$N4#URz^vx8r#*2k`x zS!mhKbi(+U+_od>)>A;~8)u#2K})F)`3>y0M8t*auqBztyg@F$w-}J(@nkiplG}rA z)jIFcccl_z*5P(!!9@VJ%rA*%o|aHX1||fnU6{rae2it?JoOmXJFLvD@aj<fJD#1Ku(c)D$uv!NTbjuM@6%%*JLfG2b75H)qKb)Dyzp z4O#XU1mD8Qm`>@Q?ry2F*3Qmd*xS)q%%y+V+)ExI&`uwjX;4~TgCAk_=FRtsTn05a z2^{|G0urMmZgoJbunPrbeh1`1){rH_#-?9xAZ5CK_kc*js&RuxC{O7a6X^h zbyVQ6_*Y?N#3KyXH*RMy(J6>2S>OJVRkm%PyIxP|0oL}NJH?DSkAqZ!a-mS`iH0Vj z>$}-0*79`r;+scl-C>P`{BV>o{bY2uJ2JWty5b;u3zayUhm$xj@brSkiVusFrIPzq z+T=9r$&t)!^_g}$fFY`LoNSn1y#(T(J<~hnKI$LI39aYOzT3o&39wUckVx(Iy3fQ2 zli!v16d|?Met(MuEck=QPY0a%52n8KzuG3he>wOeM)+5%tIhTQ>b9bc!qX5G&yNdo zx%(pI|2NUN9?$H*6xrSo_K!7LCB#1*Fqh&6F<=7O3|WqfZasa z!LY^P3=tbf@%^)@|fLg9&GmHj9ZdIB~VOB z9SuIo!lDcoaiL!tezypi)$1{I*B=PDK3t`j!r0aNvKKL z!!1WvE~JpF6{TnkFi}OLYmoyhE!lHy)Z&?Y+vubNh1eQ(;oNs?qKTvOzGQSKc)feP16d-~6C)_B6 z^f`}VP(5j?0rErFjp%o~$mkh3GR&c)C4z7Wm}{7`{I(n;pEGhXUf0wdl+(86N+4DY z{^bXBs0Vo{uxAnrG?8@0$HxS8}k15WGtUHg>~zmBHhSvyyAbhh_?|gLB=)IMr;_K-vdj7 zf~LD(7v#(^wT_5bT;mM={J9mh)K3?19zTEnyl$eV+-tLELNBej9zmQ%*aA1uZ~|SE z^uU=hh7K;(W#|NXQ&jhXDXL>%CqLK>-b?=cPgA0P-&rcupt`F!JRm*1kttrPhFwZ@ zplCFFT)1F{$h$o!amRvv!~7ImUX%MGE6WsA0}=WXV+lc4QO~NYjM5eLN9@iL@clo% zymy!Dfc{*~pheM$O1ueYSv*jrVE~EKte}RDMUwV)-0DvKO%I@~j0e!D?g|X33lTce zxNvXb?*KkmqVwJ@8$SV=78%r2on)v$LSTrMbsT{QH-SEP;{48^z#X>)2(HSxA9sF; zT4Rw0Q9j_Se5@2sl!5tCYk7_s77xVPM`hQ2Yk14RsDO_e0^sN3zsxEhocg|}yuCk-Ech2y+L~fQcDniW>3Nws1RAW;3*`uSQLlcU?)N(~-78fpKGQi-DC46&+ z1_mE)EY%DM*@V$6uJ~B^V@l?8jj)!Yah`Q)rz~m)a5jyC8Fm_$dDDegh#DW2rXlki zC2Jbc3bT4N^CYSU^n*$ESq#vEtA`k7wr>Y#vxDMTyIOfBLjdf_U9oV4%`>S|c?nVV z&DCxE463{PW<)INVN<~YeFY0h6PEo5JPB8%FZ%9!>8aQ2hI(SsR-8D=*FEt#CPg@C z$*uI%0H!vbg3u$rDoD4rq36u+Kpj~H)Wy33EyhZ0k=&PL2m=vXS3a8=sHWT#TRrw+ z-2s~NIFU~yvoZeZ)9W~sG;pO4f3^(Y9TI!|9dl_7UCEHEy^w2!l3o#oRA>7B6y}i4Wlwjz%9VNZ79!B|~4O(UX<&!*jK?bOCY`KO`U7 zP~uWJ!mTmHL@C6YV1(J)JNvd1FWg#4rVZ`I#JEbw;W@@jc(C`XO%G6H=z*%OGVxwv z;oj9T?=#?F3)G`3#SdQPw(hYA)jaHREiCoV&2#UxmNqLh4=P!#!-eE&07gG9w=&bi zX0NsTNjaqy*Cbo;)mRq%0DVYuC>DRrP{5<3xvr8cSMuV}u3cS?27MYE_G%_B$p!j<{F-s&nXgiU#o$N@DyZzto-8n+c_Z+y%GEHIATW*Zd@=38{YaObGr5p1~#q zgEoI`n>T09Ul`>BlROl{*vylzXL50gRhO)ReFe{LJ;jAZhe4STDfq3EDP$ZKs3fOz zrultz4B!(Kf~-JRIVF@sntd?sRLBhu{+MU25yKrAoYImDZJj3D5J8KwI4Z9yDpyS2SJBYibRKF=?S(k zgg5hP)1kxZ+5vD9l7l1UKn^n58|VyM1R#-Q9N%f6Qn!NL*_Q)wEIkDSGj`kW4io^1 zgY>1T-0=z;x$98BPM$t}`r;+zKVUwf^lfoIjYWXYNw|+8hu{-0FR$z2=S~rAE&JrE zbdT#sYJutD#^&Q|=FGs1iWZ2oFJVXr+N?Pkwzap^QNI_vV(0@Oi-m?rNY6lp#<4$STt5Hqm_ZNne1KzTOIRGPT>-p^kLkN;Cu&X~m? z4fqA9(Mf#f)+SCXab&q<53eQ?KKE{X8uI4TSm+vzFvZ;@j}{T- zK`Qa|xVhD>63F&_OK7}U3v4zOsu50X@C(Pzct-23o}k|1$XmLfttw=4w|(vHC9wgh zNWEGgtsux~x}=gSN>ZYLz`8~v`ZScw`LLoOR%I5SKceDe4<%NnS?MEJXWGhj2`R;> z8!3x7AA2ShKd>_0GfYJ_YEWS2%n4UV!36YZg_Nz%-&z1Xa!Q{`*UkI)lZLkH+fIZO zt&=bKI2ispnUL6thqiAznYa&)bMVie;62d^_8he8>s&KZP@E8L4N6Wj$p8n5_?GDo zYMAph`9is2CbO?5yPzIPmlD|7vH<-g?x)xIuW&&0Lb(S;kbs`{@_W(I101}%b zL+9tvkjB=n6vPNGi9%sP@Cc}!2ro|@aR{QliuKxEwlsrAT2Uh1fin}=js%`$!q8-m zb2I=DJXxcqkDot(-ud$K2p9h0&ieGk%Hu$>36Ry2-SO}Z`jWLMEXWKq%*eWgh%MYj z%Z*~H$3oZr7f5xbTtr3xB+c&yg5B3@yP4}Y#2Ebxb2F*Pr;MuWDSNr6rpHWtC_2D1}w2;J3BemxF~onh$3DV1Cm7Et6~sM) zQ5acSSrGvDafZCX{u;ofn7y4F$Or$NCVUHXF2Lo?2OR|zWviZ4u&dog%*iA?#6t` z=YL3`zeNW+@8NFAbS_jqnusi#hr|yMf|L(mLQB4IK~R~(2}ryu#6FP1k~rasUyctC zjZw(=mwGIk@^Or8fgc6Q#EVz0+BFiqsiv52jA|5X$_odvVe#qz0kgFXQe{vhlgS}~ zYeiykdhF|uAFihZL7G1FyWvpE3yTuhtKuZOV>n~r47cunJS#H+yM_d#V}$Gd%`USEPWd~TE@o6qh{83t z8u++ZP7?e#fvM?SwjC;5GCzj2fF!=*kL`z0;3SU#Q=)SDT~QI>`CkU$ZiWO3P}f&- zb90Y<&>P3FDl)7E#TODWcHo%D+@?ShTicS<%O#`rR z{KS}Bo&amev`ZAeU{NOnBK1k!M&1I_S(|Cxg|BkG+Huo5{`e#WTa@S)S4EQN=iKDN z3sPpy1-CWlYmS^1wcx*1&3s>GgL?sSG~4iUA9^yaD%n^5V@D!RSBMC3k>&@_Jv&MR zsk;^S7V;t-C}^(&@0xXM1R#|`~|!y_l=o`alHrC13a&{W#zq)y)mOOas$EtTqqZRV^25CiA|W9m z1|%fm@Ch!iHqnh|BFb%fjr1C>BqXGSUpUXIxg9y7ZPv}yzr_hQq9@$y<|3Zd9QhFE zAxZiGo6^KcgB_`fewHazOdC4G-K%h%JGF7s{)1a`0kgx~=<^3Bjx=dVx8cPmAakel zd#+A7NL{1H2#y8B@EGzDNnDRM=mhqV00#sapy&MH)@B7Y2tw9LmrIgX(J$?w+Uo@$ z;8>@J5o%%Z=g-BES-Jj5Ek22cM#Tk^7?Rb1^DQAR?z%}vd|HY(&sOZx81!TH&n<(> zH{%!NZ-yYM8t_s*okqX@#AAvsgAQy>7dYkXG*He>nJVjwtaVPYPsGR%zmr??h~!|w z%jLVvm3srJ$-NDkD0B$gQYLkun!yzz2C%^l{7XK(WJ z9ls^^t{MIQovNp&F8A{1BobQ>jfD2Y&3CcT{CvF+FS2g9tV6th-4ACZHGqrF)-}&NTY!9=@75)G%((8 zNGBBg&^00NjakhZ?$exSaCf6L;{#g0-gU~fAo>-NQQDhV8@`Z0{S|293g8$gXCw%m zI_Hf^*HCoO=2E5m4AtF z5*PlB`hJ=m!%V!Ji0}cQ@}98yiC2p&9`|nRq5b{$zr-|t;s2J_bVaII{Xzmqg^Mw) z7lF#7l8-d~53n4#YxRGQ$qSqgzwB4M!J=q@xj!?@qenzSP2mDdXZDR2XGI$>hI${% zEoC6n8K=um++M%1nBk@rqb>dD0FUmX!a)a>6;9=a>8HPhHM#c4d_1Z6Y-r^YqL_xU zbN}hnPbxP;nYbOF?*U!i2nCaJx3csqh&flUU3(phl4I%b5vKjNxLDJU7-mo6C8|`R z%w_a8gi=J^b<*X*(J0#1E1q?9T*k}G>xu}nxTq*^Uyh5wSxE1E#*sb2~g96fBwwYlAHz6Yh| zlHUhib3n*(UraQ)eg$!!{0miX6VltTtu#LTK#l?DEHm7f%+Dj68^h+7g5QHl_vZBz zKem*Si#Kq0Y4yPFWA$S~HWKli&^>i|E5#{5k95%ubOM}=0c`!b*BO3J!lx+N*53yG z$@ua4*15jM)zY9`k*OopkXI6q>BH|n)887>3X5W^2hY|@SbXo3XnUeoG$N6{u(^H! zssCnXaDrVf2NI=ZvBG0dtjfx9`LC0p_~oO555>7Lr^ zmL0$duO0|DVC}J`e@~*ue3#p?k9$C-c@hPR8AL=}1lBqeDtLGjeo33vufdb>82Xv> zz3FuUKs{vg%Qx z9~NVrPPbNF#A-eDl46M1^fqjQQCCR=Cy;@vSQUMC- zRa&fm8rL6@#t+s3i@VLau_TrQ40k6Etrj%nX=#eNn@JAgh**z6+2Wr&{uO(1z_gbgEr6 zJm#8&K8q^Fh8#c>LK0voUeNZm5U6suhz&s^$T$}~W09lxTo4yzW*G07T~M!S&02vo zm=pY*c0)`;uT>xNWf|pxQbSr)eWq$=(4Up^9wKHSfW>&$1DK(*7|kW#Rn@(e3>Nj! zsFsOZL9Lo{gMh{F)nO^m-!k@j0ZRu|L$v=wu5Y2`6LwJbz`fK~jqJ92tz%6F+!AHZ zEfv)b8SI-cJZsj|vZI)qNAjAiSb*jr!h3w|fu#I)md%Aog+e^xXG4C!!Q9W|HXh8b z*lLKqACEJ~$18Cvo>KIHqGoE31h_>kto#n~4W|Vu^{EzD43sd@^4YVSVe4)8!2CWh z2BD5O(OH$JY(ZtX6ofDn=`A*nF$T$pZh^%~HhjMGP6k?K1VtP;dQ|rQ^o1b4Q4#q2 zTAz!#<9iZ~-1qM8J)oW(fuNGx5#5xQ9VC}2ws@B-57Lr>h*v_+Vuh`xXj#6%EGLpG z1=T797oIy?lgevqf>q%ZYZ(IYFrD%x*X{5#_jqQ>Zjn;VZiF=cCh>AVb~!A;RWiFa z5D)Csq=B3Z53N+mkS`8W5BH<;_Ng%MJ&X$&MWQ1}7Nh7o?_USO?i!swrslEp+$EMS zsROGmUG$%%UB60|Qe8663aSsVi(exX1yUJClVV;VT+sjmZebpd(&@%KF!po-x>Om0 zNco^x{jR$diLKf_x$1gf1VEqaVhDWihVv(W6Hvk^Dunt_VYmMIs)!Qbi>U zb5SD{k~3J%&`Iv*%m6gdg~Y+fz-MoOVdaNZex+*a$cmlS_bcqUC~(?ciEUuML~nOU>6Pm0GqyaXdwN5~Lc%F; zWHOCsJ4{)=Lr6Y)@s0)ad|Daq*dfM(yN zqCgkaF4ZX;xtrqQQzR$f6RJASC-mICJ4CMg;XKsfm$omd95cdwU*Y_GbT@-i~$k`Pd%2+o(}Yi zsGv@BV5jN<>)H|2}N?(p-q*gn{?7uwwB z8-SB6EG(MVS%ZXoU0po~XXt6&oKFLWp^U`7u@wi@QcDkuM~?e2WAoa2UU=BNWj;>! zfG%=QH&wkL4Rz^kG)>-Ejgo8D@Zt18`)|AmdzJ_mi82g|Pap%D#}}+ZS%tCBXZjkI zNsc67ZKO%-`~8B)F%AK^A6|9TfYyv6Q4;BcV42{N50`IKq`TB9ZUQ>&cj;ne)-VdblUx93#adMhl&xBVmsMccQx zHs&qvO%NHv+&L8pVkVfD-o~XmUbm z!lJG3cRuArJO~d*a>)j54Fu`Dgtp`Qo@h}B1z;cetHBZrz#zwQ^(Y%;uRMhIeM7jK zk(Y`I_L_;daXwHV^28}m$vD)@LV?qli_!`CH-P{m5mQBtU5CXB{Y}=j`%s1YZGW_W zA(h1!y}@yOT8%{9{5y!tnM$M4L`6jxBmIU1j)E#wZ2yb^Fv=@vgSXpFVC51O9GRs8 zDbZ`NkJ*YQrFFq~CemfNkXLSY9o}`uWZ$pI{w1TIvAj_QTxxv-T!2S$ANSNrZl&co zVqFJg8X6iVQ_yQIzOf59yJYYlGC3gFB@G9Z)USzgV4WmU86e9BW&lyV D4UFOA> zsXd3;1IZy7Og}^J*tGX@)oKhGYIEh z=;+;u3M1~xlOobppKaSCs~z5(A_$HG?QaWy+V^_Mk~xa-#6)8h}pKno(9 z4vITsdO#53C8wZUE}W*d{^&69}Q6 zKSs5a$Ud>y78_cmqerNY38N?c=FLfXEM)Ug)`H|D$~+Ynqt=PA^{doLjR(t4Bb_Ee z!eDUI5_2JR23^K^PYT*oDhAP*@6TS5aZY)9pg=mWOtK|yy$`1tt?aqknu0)SO>1X)TojG{d4 z<4s-x^jsaJ1#u~1A~=O`EC*uZS)a?w0`5IIg!PZdLmuRYW_MwT%jRMP-N1nix)Ye7 zZIQaKK(JaE0aKjPpeYT@R$wz@dWbbrI|iOJ{C&h%S5l#4pq9YT()=@bdSzwn${);k=%*_=yLZRMghyLoQj6 zs>B!r=!loH(V=}NZV=+$5Ed3bZ9#(k2(o&~zGjhMLh>fcY0C)DO@#9RLJbYuX%Bv^T7stiu?QL76tnE&378->VjwJE0Wq-1Tt^?#V)& zT2BFcr_*xM(y1~qg#C)Lfr|ecWDrDkL#RD1kxi<6$<+Fy4Ux>QXYNWuVbnNU_mRB3 zh7GAbLAorAAkio)#t~-kQ3|CS>*y)gG=`dOL0UMnreFdh_9!5ys1$rTv0@ZB2qxpK z1J>##f7vc?d@AN0N>!n(>MO5q9zaXF6TLcIlNXPdhS;-2nWt%mBn{QsO0zd{L`&SG zAvOeLNl|}c8r4BiWuq}|e)n_FlSjA(PbAb^U#w5#8hed|-?QXq%V&nGGdlLxWpJ0C zV|~8U-B0g&K`Y+?hd}sr+i;%|h&AQ%7<-#1#Mg1G>gi8p=q0iO@p1|Z`fYRG!j$=P zVz88OFz;4h&VdB&(%rlBF+3?Dq`k9q8XlE#u50inTRjm7_s%#wA0}^`0IwTehgSsz z1|IBtC$?Zg0NFl6fUkvc^P==gnAjAljlE=QW=2MX*vC#I!|Sh?wsqDf-+Sk@4f6^} zWE##YY2%iuUvb0?UzU}KWP&~=7EZuWU%Z1+fxovrPclkDe`Y|Z928oHULVW#+e zzwMXMzyOWk*W1@_jyY8zJsu?`X|)O)b;tNGqVDzj^<4phW1#oA?_S(RH z;YiNm&`%@JVY39VjM&a&1_lO;F(s45`x)01(hsLvX!Tw>g)G|JWZosvzc9SIqn5Frk|7jVInY{b|_S~p@W$gNUg7xGpVVdONL(g$>z*XZ+o7e zK6j#c4@eqN^|%Tr>ptI~lzVgp*Dy&SfD@7rZ)m}$b2Bl)ZVVW9_V`K|`k>u&$CPL? zI`%@?+z2!-m&WUIKNDqM01jE}&AsKXUQwWdd;@7W;r4>lz($inF%Y1jekImWlvdmM z%gyU~v2}Orhe5M^uTOsh6&HXrKB;Dyws*%$^tt!S-7e2`_x{$JNZYe#1}?<4C(1;h zG-5xK8XSjq>(p+@7C`R8YciMUX|f$+J6$KgLTPvrI!BO6roaT&Dk>7+ETSqaj(GMy z-tSXSJ;KE4?lP1@GkO-&Mu*Cf*or`ykrIVWA^^5ROrwp8kZy(=jRO#W#H3ja>qMBb zHRITYvNRK>=K1p*A#c2nkU;#b$sd|LaUg;jdxNK2Nlw+gUJm6Ob}uB3>S#AX zPF==DE$ac;hYZ97!A(?&s3<2v9q|gIdlBlu2N^%n#z8Df;{N~xZs(riwg#z@%y$f1 znu_kJ7%GsE3scS zmuDJxA~QCf+bG(g6UpR@9_r;P){fi?%3yCuWp&j1-laDGx*~0R6a8Wa_BJqUM8}6} z=p#DYBH1=iCYV7IR-H@l&5}?}8b~$9A)EB_<$a8>y@Y{sYthNnKGqZnF4n*!v%a5+ zX6eyjN-1y+4>WH^tyW^n*t9x6q4PC$J;1-v?e}5NeE1F=jH8L{C1K4aB@I&Psss!e*+unOaJFGsl@2t5N zYiY9e^mLj=XppYEJ41wZUXgdyvO&uLaE;gG0SlclH_r#P_6E&db{dMpz_z;g zw}SH6=;fI~d8MdhtCog?`EBNMHVlo7oCuhn*^JF(h#U3}>Q5U8mibW4#lO(aF(4<1 zgw;X1r+aCptZjpc{Lm0XCvv>-nw|PsMv!_>(4AgQ4QA$tr8@J4PfMh-6C+u?sLhVU zLuwuq6H^9tJ3*j=Lqij_^ha;p1u$##kSNHRBjGug7q)lwr0qghQz}Z_W-(9kivPn10{b*qB!90O}$M)qr)- zvgCvWb26&=d1j_%QBhG6zUBvd+uC@Xx|T^NJ`?4ESnF(SwW6^Gsup{^{qU+epck!h z7zLEHom~%&b>EFdfTfr}L}H|nMq8G=5>W3cNTH;TD>`zwon*HjQ7Z_brJv;99SJ}3 zjiX?}-;6Gns>SmCO#AMi5~3Qkxh8|V;sp18in~d>$RpcvT-W3EhkuL}W_l{^Q|$oX0&>k-|ov`P;wv4jIA?{1bA;_%GKJWpN1tVOllk>i^TXFAL}zKc%cbP10MC z{Ofw`)(M%o-OmkSF|%VrQs$I%Z(}8Yx*GZWx6ge>njhMwxpNIWwfO$|p0r&RHCtr< z{t1GE6X#FNGJ+}fFPqvVq5G$y`<*s(=BeR9)OddA+m+5^(x{pYF2mV=f8ih0kMHMc z?j=$Ekfuarzd3K+U6+#0lKAtI6uoP`<^d$b0cZWrKrGrFTp^@)N=L@ucL4~wzH6CS z6@)$TUfkQbY2(BChNVfAv8pI&?cb~^9sO2TUi zUk?jT=uu2-2aNg#f7IUIPOe$FV6%sTYY-_It|>CGhUDbh-HQ7VO;fs^(I()VC_&{4 zU!~!pK{@U!!k^N@kx}%VCPT{`Ls@-? z5%g=x34R<_T$E<;;+zb?SVvbat^b99Sy%PKQi{z?%+F8&RC|6J3Z`Sp5SJ^VJ)oME z2VgBLvuxR40rE)(sp>WX+G!?-31_}kNplJXZ`$l;sI@w|-O4mmg!cbs)p*gs(>LG?+o(xjl zqe7hsoZAeUTp)oWpFS1Df`_uM<0Gb5Uqz#4ivEE3#UTRmL22#K1#K}2JhpEyn-2|0 zLSeIL!AQ#GuSYl+vX}!5wa76cGGGicPcOW- zK=~4U8AK#U@n&V-8St_j=PO)U?Wz9v#t569*n$3COE&a5jE-^c5`u!S4ka19_Gp5> z2S;*?Dwxx!aN99I#{&m6g%tW_)yXw9w9Kb4RK2kSk6HmhEqe2$bziQ|U1P9y>jk4c z9NoVzl+kh25htLcm}A1hU)~81mpB4tvxB4KF(_~km4>7CC4-&J&t~6WsOEWpuiWa@ zW>~}^6GG*SK7kuGNI9BY{VjP7=6qQhMBN8tH1&cB+LR3v+C#upp`jVEZzdPy_!x$7 z--#P5#N@?_iXiQ9)dwth;EXG7l^8&J4mKGgfDu5)XSzT-SU{a&JhAR>03_lLT>ycM zCjz&f;##lDYm0>heB)y5z7YK&R|c z2wS&0IVnN@BBN4-bztBBzO0NO6A#{@&Rjy+WrHI}NPNhn(&T6v4(eA>>lm%&5V{Yi z2O3ZO2?bNc=0QB*B>P+*0>D;~@Q9CS%}YEYB0sQ!3{LO!K&8Lm(9jRS|JUD0#U(%| zq4;305ik*RjKFDw20Y-`{N;;D#~_*%0Sw?qjrWaIL3>52=wesx#{HASKQP~jaDn>@ zLOu-P)~;o<;r`p*Hpqv%k8**O^elW_``H?0W0AJw{(eCz?63sx?B8dtW9-=v-=HBU zns74DAx_(X8!tfm=J4E?FOksg57QG;qX@f*&r83w7)^xDTKBfPpo{V`^0KN^rzRAi zv>p;Xl9!q)Pn?D$-y*P80>3eQh#@k10*{dN{?Nb z$-7!KZ;*#5v>RFIHFiboZB$X|K5$vJ?L|(`BNQ13%29{$0fOg(Xg@0n+ks8!U1dXC z;lVVrz&a#-)p^=9@%i(4blLWY)lBeaCOY*=AwGY83k+C|&Io#FYx4J_YPqX-htObe z#K%V_fu1;V0!I7&yDKoG6((;i%*uHEm2Z!&LtfO`F8*vGOUUV4y1QqGUhgu%pd^1t)3V7&`3<6HzXNBe^_lWrRdH1;R$biNj!9LwBvbuk>d|{EQx(M zIOq(!b7Z9Cx&Ed=%;xSZ;ZH~3L2_3xabgPbk9mw=!fJxs3f=q_6zofJgQ4^@&bfng zW#uybU$grVzbrK`05`H34#2faN>eE4T9zPj2LNI|;Hygjc_cwgZG9ijhfou81{^#% z7r}D`mU{5+^lA4}>M6EZ4hQIG{=0WGKnYFNb=bPAN}rY{R7KWTeF*)|*jT;JDem&$ z3rulKU&JpQ6P7rz1%bkOa2pZO3;8`-oE0O9#&U!wppH3w1i1(X>p)kNVlRCG0 zzvI1tTYV{rZpc}$`1wu8q=|}XNI=w=+#sqLL`(@f+q(8D^#|_vGyy8U1E2M%_ms#H zp{v{n&P}gLX9_PtR(3X-Uj$%BJoNPM00$!b1KXx?=FC$g4pF?}bcbQ85}gW}re_<5 z`8?Bzg&wl1{-q{8M-ZGyAVnaiWV$S|9O5(V94jm;w#3Ip%v}H=r(l&QVUWPyBhQXa z2bn80XO1^SW+W;QjxJ+Y2pm!5;pbd|767W`t^?;BgpD}n-Nkj>F>+EtN4^bPh@>6C zk7Awcy+=HfBY9%tL&gL?$7)Y=rrhs>GOBAgT%edE2!#&Rddne3xVPDt<~m;Sh~zLq zr}(0X6|$M1JWR|S0G3?0tnEgKPspztc$y?v>oVm`f72Em7aFm&Ch;n@+zHFqc-jLv zxd}D^pH=~MkVsnj)M)m1HR^J0{v@RhgKm)P-IMwC+sM3tvU?i@p!X-?B1Ccvca@T} z!S4kisQ%Dmc5QXV@nwJi|1_c3ThISFOG&97qsjZ9s{7-T%5DEH$)^%S55;f$B=m!_ zBw`4aHh89Qd5M@Oiqn;7Q*l7{v0y&hhBZODE@$A(svxfjPf8J$H~(wdY`^-wk-D3E zRc7ZW0@dyb!Uja4K?Y}$GSRAFm}=Wo43ssi3s=|I{(d5;mfeD#fj=ukp^)AGaI?!L z(2wRwJk!Y@Q39q+tfs`5iIa)ciNxqmAvLa2tqEQT$T#Cf_1$ucboIy!+m~!ZpSYm{ zZZt4hnC&JlN_DUa;0!4?P*|i_SjMost zJsvax*DMyj#JjTxTjCP1U?P}>4*n5V5SsmOAAOi|%}B6Cv3UUi0dfX@ZBHmPFiva% zL4Bb6CtCu=A1}on@{jX_->0K64>u3wl2(%lhH*gyJ|&pD^Q*IHkqxj3C|}7w1c3h3 zm0?Y}ke#g@dL@JD{{R(OcN4*%yRO_#!oEM zX^l>sd#u8;uv-xpLunH_xf zNE6)bV=efIpgu6#OeRJ!)T?B$WI_}lgPO;f9A~Hry~G6|fvDgo`HRREW!blyT3cU* zSJDDP(mSdiDp7i?p`1#EC40sA{)31@MbrBY7%MVYcqr&e&US(6Y-u`TXc!6F-U7Y{ zfJN%ub9{w$dWvb>&AlTcS9Ml#Lq2lqyAK~c=tCsSfZbPM+fXRPCWQc_gYB`s0>2y8 z-E9x&5k)q1(BSpA37_|)gL}1`e>B%YuB%@wNi~_ zJbps%9~^eXl1*6*iWvvS7>w;tVR1xEp(#dl^FQ&D#Zn^g6+KQ*aF8scZihu--2um5 zHSI5)_jAMZ(rWsFt&r%VwPyL=CjzehDi$}(Nb$;`Ln1=wWh+-EYM+Os+4#jde$4hD zHVj;4SZ7bxy$5G=UchMCiWLdQItZL-d|M+K)q)R>;N%!80&riBNX-v^9A~E7!E46B zGYN4VFL)_vj0pe;JQ+iXE+AEQ*Rq?jx_h0lg~Qzkr$d-~8TE!KIfB8PJHFGXV{UMA z*N%*2$Hv!Vci9@LvxaKq)YK#>ACO-|AwF+2{8m(kaD?9|^~zBDbuv|~NRPcjqWJ)- zS_{WSMZGF6=7lljGN2?PqZzS#2xScLv+Pj@8RdtltHw3Umq({|Z}V*;QC-K59W#aE zoxBaq8-H@MypC3wB;jPGvbd$AgBRx)@wP*&tKNE0%Nyk_aYvFEG=zW1!&>+f!Dx>< z>e4yd3#s}apv2h0;wvnU%_h)}b;SY~M$KV+4BcB5yk~7umSa8V_w&Cuvxg=Rp)Tnk zfj~$#$Hv5v^@zrqN7rFtDj-daws_}=tFeYDAaA0tW%LY>D*`P75!OQz(gKSS=6(3> z?dYMK*995D989@@^_J+4ZS#1bWSaJCG}Xic^b>XvK45c{leRj1lR$uXH`vhJ?d zfiTqpr70dvfuFlP9NvOPXMlp=`l2K3+_kmqIFEN*WY8Rn zW91^57_%XcV-|)2* z<<5^ZpfeAxSpXM}54gLBJ!cmVr=YI^;VlM`fqW_Ab$PZj)=B^8(f3*59lUV90wLL` zGU`HT;u>cy)phv)n0xbRs`oa0cxzOYN<}1TP^k=w24jYFqKrj~T~tJ(BJ)%zA$1yP zK$NMHlv(V|6`2w;WJ*ZpOxw2idwp=u^PJ~-e(zuJZ>@K&v(7pvV(;(%4EKHA*Kko9 z1zO0Xl`=QLh-N9`tewu#r7a;LJDhr z7g_7X?gmaUO*mUjLGuNkh}ZNBF(kniSl--U+%`YEaweS7?1)o=o=`TBMU>5l{R4Ons~ ziV-GN>csHceiak4HR9s0(sEZ%{rJR1wo$X$C@ADme8jwkky?zj5qe=%enPnDwY17O zZ^;XQab7?qFG!jNs);V4?_k)?n-npz_&KGGe6V$m^?yE)m}M5C$HR~npK+*fbkz7zvD;-`wc$_8H0qDH?)RVUwH zsGQFy0m_+ONW!^{jzSxv&u>_2aj?d9taIv@!g>>J+=XxRu$Yt3= z&3e0MFh!3L2MEP&f^J6+7YqPvpenpwun2?Hrluz0t#2dD3jQpL)@-Fb!|mWghQN|P!e4@Zm@HF60JOwpO)ir%%d{|Dk2J7;OwM=BzS>n z947lfm%LE&6In5Mqt8wqhW|E>(cs|V)o}a)YR4^bO^XSoE}1!DbcKO}CuRp1#;9=W z1;!4M%a16)GM;FRz?D$BOZ`uf@L8?GCo(*OpoCn&7$=cA>84G8=rR5$SZX`Yez%c# z3@K+M_^a9YLfWzQ$m^VoDI-c9-;X0Eb=0E_K>R9BPU(iS3KPG%fyY1572x{`_(lbF zJ)qAmJG*i6lT#lT>6}lEI$&Eq#vqVre~E1!CV98ifzjll&rU1F1H1}i`+oX=z^xNJ z`>|1fFomjZ9j}!!U|+3Bv(33zN%CAC+JJ{I80|)g zR)R2Y782z$-Y9d@%b4n-&^EJYX`~*9UoB{7zA~z?pUAV-1ASn(FDun{h zlMm>ps4>$zKUY+c%*eaCqhqA4A~ZWp5=m0jquqA%QG^|Scn(Dk4>lI2LoD93@tiSQ2wW8FIm9xM+cx4kPVf-8IRiWP);6)R9&7KxLl z7mo-4*rBbVID)ewVTj(n-5bW{K$?nn0|15NM=bfKSJ{)xZdmDKjCP*zfU z$D0dWknawTU{1gT(-yi~t)ssaVL;P@vP^f^zHJ~NfpYlx>C>klS{y>h==3$Pf&;|` zluSSix%gy;~4C;W?TM))o$6XnPfpb$tH zdPuQ!A(%*C22($mh;IAfl#8*z{_Y}pu!t?uV9r$hbV>+1WHM(x7$zSlO{*EMUdlodlqR{2 z{8mhh@NG|2S0%zFY#rP}50xl67=xsuxaO8(tez64=V<3^U zTVSPb?9#7#*XnNBKR$x{%fUje=NhI%TL#4O!^Ck41)`CpPS~=4$m~U402bAG2$gy! znSW(vi+p^196&2lZ3oR}uD4Ql14U=et#|eHEBz(FQ(}u5D}NLkdIO*$Aw$9x`bllz zX{$Dv*|B2^h#jW2i3U$vcnoQMQ2uGvGU)&NVv@ETr; zXM42x8qC66eIQ^U$!egbh!RD^0~(HldS}2_Np@-*GsDMVutMNikuGXOLQJgQp+n(V z5#*#sBsd#oiSN@7tq!XQiHm5cZ~)sNL5#4XiH0QAHmjdw(GK~^e*fmZKcWZgx3^hJCGG zmhh0fZQq3DM;_V~o$1!SdxQ~;mC}R~;~gqryX%@q#{%xU$Ke=RyKC19#ZQ?Brx{>P zy{GC3{|n7)u(Z{KnBnT>S zC$R;_>wVYIAn*-5&`oeZzkzCik=j2l}^f!ZX_T(Rvu?KP~9UqO5) zUQp@wm{ zP&4ED?FXSd_x*eJ__qVsYXetWwLQFlKN|_GIG>5%GKGSXKUT!jp}_PSc#i%DYv{nK z=p#p6Or%aT@on|5ITkM53#>lq67(xLeVW1etpB>nq?XE$C6k)^yH;KMFD^hT$;aS@ zEial&;#sj%MkW__2Da$uAuk|?Dm_h~2KNIlL3<6>ck4n!T)+@t+kD0hV28eg1suPQ zpFHVB7znO!H*sx&T#XNdxt(@DmyUeDO=x*vzup&JaAp<`)cX6IlhB{@nu?4VJp)Jm zkh;0c6za55PdA*8g`vKHS03psfg%y#bU+vu_amP^J&sEU z^m!HdmP1Z=9I!60mlpH?1l)>@5f^yitz;-*n;!OmKvs1s-(qKj=t6R8p-(5?-Ds#{ zAC^U@U`tIx3BW-T$>4`L0I}^MF zfd?xwwHjp%mOlJlK`bWGk2C>dq*yF`X9Sxh{RwM%V0omMA>9j!5gYiS}0sh zq1!NgGdaw|{Y??US2x*qs#V`hjv`DS!PP;seZ{UNy_c0f!I*eI!0 z;kJw_wQWm)z z0mr^}HBHeYuV>+KWuHHPhAAg$TVJ0l;@iqR9B0^!?NA7b#fj6WeF$$EIPUKJ_iAB` zQ`-pAPrPg~pdxM|M9%M87Ija)n;5x3%5?%H4-~WF;^Ix`X4Nev>6qBs7s4<$rBJXn zJL=OU3Aq`h54@Ni=1jAIW1?y>xdCb z?$HtFWPRuk@YUJvQ~=JxnxUKkoH4vT4%s(=HH|0v8`hz_BgQyjWAU2eAmhUY@c>0# z1GwmcsPxH#pb!b33orvpUI10onK1>&YX*8rwdFT@$zFN(>=}us!2$Bq<>LMd*>CW@ zV?c(rTOoY9ajJOTVY~LA64MvO>Q#hS9*4KgCxQ9pE$I3j`kL70EL?dFWjVQ*_i1RH z?RhobR5?XR0?sXci%y^!S|b7h5yNSK@Npx5aUo=7{@Ic#Q+68kE{^tEH1L-FY3u8$ zS0MX_(`&=x)8=#boea3eJ=rcymZl4mt}JIebcp>V=Y%`+h|THS!-_%fMdHV=E<47v zLhddlsdKy8!nX&*e#no=T$pu(V>!e0qx)eyyg`RmvVgL0wrkPWsx5lYJ7dL_yqi4| z8j~;l{h(rN=)?*!u{)3Cp4$nhag=R}kg6R$^6q^l`-_2jB7z%34R4mt^kbV@cB*`h z!6;qn^eOMwkloGwAu^u?4nNt3Wbbb)R=f@3-y}+%qbu@ITx3AAzw4%uc-QB-^6jNN z>^Dxmt($uz1{-e)6sVVPeKzxCoZ*x7XG>10j+}3&acTE8TD;%F483t^rpCODMSPO= zW}-fVSSCHnYx&2h7u(HQ5`*I+?uw3^c)e)t#@&u_&`LDyMf7u&_3 zrt-R?;H`{W5G490NwqQ4S>i>+jBjS8If<6~NwO#5LBHE^Nv?=UlVjg%|GE^OqnXjh zOe%tV3L;b;Y<|ad1lykK=?-vH3p6os+{M89OyQE(`okU`d7}I~BJkpQ2b$P?WL>V^ zkTjb?S*Nqje~(8+v*g_Ov5#a%6)8WBGYoHU4>R3&Uc%?zop;^O<5ycXJWwBzbyy_I zFa5YtQ{ja9AM@yf3NJqoOb?_M4g6YcUzS7ZcvC*dXoH=1WsTRkp@`(=;>2YqqoQuE zC^~Mw(OXZ%(^GFo?9!WjKQCP=xwLrk`$ya7snsQV>Vzw%c+C=x8?Q~ei6hKEWyiqb z_`QWk-c~c(p1_yav5B%2?$ANZiSy@w8Sn-N z#~i4Z-oErppkS-l8%Z~qxt#j?mEuuy{=B)Q({|AV!5)&v#xYK#i+ZT;pFVAPla$|W z*VX42y`g0Gtj9?R(+v`vOS#kbz3H*aWqc>aB6ROXtu@(Wi`S-(u`~`A?D3toi-X6) zp9X)jSA5%MwS!Tg)|`7n!e({4sL?Y%X9M2XPajHP61C;m3&6~Kf$~)Y9_A|^_g+Eqbov@4#Z>ieK zkTzR5V)Ws@q{U~su~x2nhjsyqVjI;xTy_V~lGvpo&@i-9o#y4UGT1DKAMN6$_X%P7 z6}CE$^f`l{B0q-3Rm2)N!zsY*W!cC{FXz$>okjUBdUQ@u0RQ>Sl1~R%^_l#UM~+DK z-o9P#Ht>D#=CEnVXJ641*4AN`eW|LiCmZKeojVP21-TrQ=aq8LU-R!s9ckNPt+eKL zcHXyDH}|c+_2KboBiCWd#!tn1GUi&&@7O5&n@o;svkn{ld|3INm2Mi09>OJmz1i;Z3hyYK%|NwaKj+u{A}&n0nZ3cI`0)MYHF zT&xj;YI(@X1|F?^bipnkizO&GS4d)!tNNWergpV~hxDI6{lg(tXY#(rzrKIn@KZlp zWd<*=#hB+Pt?`bI@}_s|HfC1!33guK;_7kj3YE)#_Dt)1`$Ca{!EBMuCu$=(f?js+ z4UTgjW{hV(-yE#tJMi|cS(J6Z3{Fp6=%;hdB9*I=RPS_^7Bfpf8J)J+*MVLap|K}DPOJ1>**h;qe`A zvN+ZDtl@U&smh?bc#8Th&o*gNG#DLyT;$x_+`~h?7D_uDmg(G_=B^OQQvN)N*WkkC z(RTNJW3k%#%&v1*Su-E*yA)bX3%CE}>JjHi`; zd3HzErLD4HzfnPz(p=R4sbueF9-ecx?^pti-QQxlS2d@Nvz3)7ynFTnDf{$|gg^KF8zZ&zW3 zxfg_e_>h`;&L~&1T=`;CxE2I8OQLYn*4s{^G zxKGyhn*H9x^Yk`Z7=LLi>+%55+);Fq<>n9~zaUC4IhDU@deY6ew|n4bn1;rKwF{Nl z%Zt)@sAqc?O_xjo8r=hU!H%J2ZPPnlHe;QC`6{{T|68PFxaxeM>|ZyS68jmG(&9aS z(*X75&v9t_Vc8a7%mid8VP5ulPN|qQGg?(NNEz_Snp@zpXMAlbi;Bz(f)A711Rb&~ zq&g&jZE>hV;XV(JR88_b>{%N$_0^BKPcEqUgKAxS{Ls2OI66w5+7h6Xb@l38NSPnR zF@D%WGPf_WK1?NC3lhJ#zH67nN9Ls<4iAQ=1wHeZjkLtnBYFas!Tux$?s}mw~exj)TJBhrTwvF}}ac}40N_^L41LA^EnvyUvFzErF5#z;}0k+2gkASZbBoRDXys5&-^&2)^ zjNV|GE{p1a5kQrE7+4%XcWxo3CU?j3?@;vt1d=QZ=+*oz1j9$~PPxcqN(8^28E8Nb z`Sk*4AwwC^^UGnL@OG%`D8=K^qm>|_^%UQ%y#%s?SJkLp5?X%v1S*RL<<{2n5vGY! zcvHn(JO}VF1<FbjG~n1k95ORK(n&* ze9J6j=WZ$dmJl(n@?;l_{vr49w$f^*3Ftb^d^Phi_(z_AFoL{Y%(|pPBXg=*P(G-F zk!T7gUxev1&9LERd^9MpNOuGuwj5wvz=>25O&BD4dV+_KHA)YZhp3*OTTKJ9OS&7j znRC#su0WTt7htV8Sg0^~xqjVqa&zAFsc{h) z4iNu={yt7NE!Ahj8BiQHwU6YSmj?b^9m5 z7O4~3R&H6$MgzXuETz_Jcz%&FF>x^=UL|0BzH;bq+3Tkn!|Hg$$a|rn)Mbwj^M5Pb zL+U)H4$hEQP8b|!W2ZRxHF$Dx}N`Haxp%kb-pY2yb)mpRevelZLPsHrp}k;>cR@kP)!6Oec-js%5E z09BJ1W3^Ys2=7risOT#+U1`4|lMueOuLe6Dk!o9yy!HYlr@r-DI>tJ|KB^x#iHVs} zY5=7<4lXvuj(>r1O1&8dtI}AkJ%b=M-PaH{h5R+5!n@@t%}uTg!`6{q+qV7r3GAm{ zr^@!$)?5<%6w=Q9#wBscU80DVMei;R9&T&A$>?f5>W06oA$&UlP@||EA`U>=wF%3A zNlUVW(;&|>eIt5eeA(zJOUojXCD=0v`a&>RiX%tNC))i2C78_5yhWPTL(%70`~Udk z1iafI#~>}YG&n>MhhU%n!nEwik2A&n-c!57(8A0R?o+El#{)TSw*Z43f?#}_O%MbJ z2lGOJsMl(%v?JyFW*T_o_2ZbEKTc2Q12=iOfGNWyvc>_ z7C`WY1i2=>Mh>yJk97rzM?T0~@Stx&UPZ;3Zv{AE$8h`(^F+XPz|hT3Ec(w6CoKlM zGMLxP%Z8W9xP8&*rv(oYHU)i)LikX0_=Nf|y82|vuunObah>`%ZP#Low)+i0_WwoN z?m93w2yOSs6m7TH{GYVi^vZO|K1WCiLez(Fg6yyjAH{^p3-eHdrotamUxt1CQ=ERd zk-f=KGc90G@j&@r^D%^}Srt(=SWA@a1_*8BXy4>) z3)olyx09388nA6a3fZ`EBk=-1Q~}mE@f!t`xCN{pz;nSLJ6n_p`4q!Tk^u(X8|bt$ zXq3=asRV^vk0n8;;t8idjfc_!b_Va}+r1maVi4YPN4P9_wktGgq$`{aC$Uhy*`lqN5xPCdjzxk#VT1!-HiwJ zS$-?1E?)Hbs3;$}PP{=UAc5*N8Dii7-<@LxEZi@N=V;`fG_8zw^&Y0~kNgux9Dzp+ z4Us974SMuf&@t0&!@F@T6^s@J)-d@o(%o?;N;*9mzXy`id>0oN7#hOF%LjpJHz9=q zRrLs=L&!HY(59-32tO~00$AuFhUXO+Cm^8@)4gxEgMUnl?I{j>b)0Pi1~1?i?Sy>M zINoJnHSaUlwe#h}%99&ARQfc9q&>YI`|%Le5+Ci$4&RD47s$eKgIXb zj;OsKBs$Kwov0@D#&>=GHvF_uZR%X<++|ySdFt%|A<G3t|QpqXy=*{0@anYZ5lWDex}eU5yHkij3E|46NuqG5i2AmCDmU> zt{O2wKotmK&Up}l0*ZhB{7F8N_iYz&vNfT%iI}{o{xC?-eaAH^U<(+qLR*tNXzB z4}=+sUN^l`*raZ5Q?GkCv)q1s$w!D{r)lu}%q;8;NuL5$q$-j(;h$eijc9Fc)z;O$ zf)@gws|jj+zWb;&7QoGis1i`1dBKMF+Cr6$tn~EV#DE`!T0(b#Ve73scV|0Ugb-dl4FMQ^TZOF@E$rp@cj>BXn=~E=rZt%gF-=*o|Vr8bT)wAZhf5KjvjWE&dtfw&QXIvls8#hoNr;NY^eU}!{16w($YOgwqc z@}{dbcB^l6m*}3}B0_vM)*zOYVmwvb_!=T8;k)~fn3}G&JbU(}PV60|Xk6?B72zx@ zs`_!1n$j3=^|*rNBPk(qV!?t18rOmuGIg-!;+JgqrxI0*Bzalu78Zb z$N(Ys6o82qqv(W$TWrT6>eU{vUo^xm5*E)y_gS2q3;A3Pd5dBZk2dugLa%7c}PzWRGCTmBW?&g=D0thpos7vKakveDIp7uWrChju;H;nq z5+$hcgmjj~ux?1tZ!m0RYBq+s{;JQydov9BoHCKu;Y**ln2#z@*b6V`KB7~D#>Oy6 zYIt^;&AuLQOj_px6e>FF&z8Cigqh;9q3 zgxH}I>lq4xKfv|^la{Z`HY(1Q$kyVi3Q0_O2_w=yph{-WZf8tW`wN=yd&z8nyOl+1T#ZjgVnL7Qh|Pss)VvxVH%_F)aZ`L$2s>3 zCdbFJTvou4y8vJlhUjc*$8HdJG%$UU;)=2tD8>r}80m#GV!0=9*&|kb8QZQn?LyV_zIQImt&3hTY*-fIW7k~?aGw={a zjo)5_{w6d-;2WsdgczCSLgJqhD55Pd3&B7RllDR^HmQ4` z_i8Xmw+fXq@goNZyb0%|0|x$;_rR+51YrjZ0dFGuDd$wK)QoX){mDfk^*>5iWUt3x zydS!f1y(4|xKub=dld4JaP9K?`Yj@E5))7Sv{)S(uJhW&k|(BaTOk$Y@$bta@-ai% zZ3=sRQm~<{e1m(%<~Fsmvho!QeF)DKNFfLlg%=WwK@8k7I5J`e87@NTUZc$n_+$It zz*V0PJgB*)#RMe>Ya;;8J+ILUzJ{ZoG?KvrT;Q8r1KPt1N|LXHcNNcdY-BVoZ@{IN zfbjB1Q`0+;Cf@V$^QYO~-Jefl#b9rQ?YIJzE?$zK(J`zoau?zk$Kr~LKXK|U7ZCW= z(@2X?)&PYS^p5)l=?I`iNLS{-$IV0<4G8WPI3xis=nbdq$}Ky0)*S!WL04D^6YZJ@ zQkFG%%sA){gzUZxX|ZGhZ(ZUU?m+~P8;Y9BDKKGzn@=%kAOWm;g|2?>Re%~iKLuS= za1?mgPD|jG5^sqQkGc&)@kWgGMMX=@d9>I&=V9T2v_n8%@LQ5Zm0SOsz=g^R=lCre zu+4BCm;UB5cmP*HZcX@KWmV@4NmNyi>BJuxYbq<@FWU8@3f217zrk^oUrU^1r%&6- zVl4gNDBc??bT5{hKxPP~!JDQF1Q&!QaduH7Ud@b|Gcz)?YDS%}Pl~iWq0<5trM#sh zmML)ux;DUZiXdix`<7K*6#w$257<;-qB&*$3PptGWgI%g4q(tr1;Z5@^o%Ph-<+No z7C9gOeVApd53(=$Gu&%1`{b8dnMrB^)Pn^Jm7`1ghlcx$YTEeg28~%}Uv9fGQPgT~ z3*p&Hkkj_|>W2xuPZ`+GYqo4DHgDtCMg0H~Th(^3Istna-`K}ZW%cDZ9)&Xv9R6=$ zGaLgmrn)Ev98X*e<$!^V#g0-l%iR&k_*Ow3ZJ{v(1fh*$Vuhe)T9myWifg%ZaIep9h3(w8c-%UuXCY zLAmg<;O9U~P!^hlO#>pmSbdgjhS~rNG9o;_t6^^RdAK>fk;wui>nE69e#pui)q) zc*L^Jhk0dIDkVBql)9%EA><3ZEnZj*!+L|nv|_)}zep=x+jA!KGnVU2)TV^HK60eJ z?kau?NeAw6y?9X)Ta|t8+__Uy_EzM8S&V@iy;fR!IpUH$;^MZVAWOK|eo(t@rlOIN z02~}(yKw_Bb<*u>Jl>1$TR&eKHv*t^WdDAztt%|_2Gt)g4nqP0tD`E%YS(s_{f>%+ zd>6tNi2MMkdjKu(xA3QtpE(m$-D*jd82X&z%9-Ha23U{c&SJM}gc|TG&G!VfXwblj z#&7U}?21IaR2w@lzy&ZG#u^h8J+RR&d^q;`^IQZ2tPU!I|K9UUJk>yePr$1ed)y{0 zR&UZ9aY9%%5j~ z6c0Y>I}|A>eo6WVD&+EtiW!Oy3=S(6eI>QFb2n=-H&t`V8~1h@796tub}_qlXYsZ$ zEYU53IQI?C=YRd1V#=YDoQ0yslti0JPUU zQ|=e&ls7+|gp5h~#EI|%9_Orq6lAPG)Dr<`bVz7&U@eiFhznc107hI9k_GUnAh9+v zadF;g3QJ@*dHtk;YYC_IX6WwGk$pa5Z06Dr2Zgo3YKg1^v_memS=e6ZJJIM7WElpy z+L-+UqTDVe#YO=eEt@wN(Y|aHe+lt|L}T*~GUplXdSNUm8hqx-7z3{moh2zFf(oi>CgWmg;4sNkLZbkEXwh97 ztYDKkqy<=ZZa3=S(`5h9(!%00c@(xrqqX`P6P)Gp4FQ^4vY4}NM*_7b^71w$x?66j z1l<0k-Yq_)re6|VIik6OrXw4V-|k*N3{2KSYz$?aEkmiB8=<5R9`J#Zx)$6`$N`!v zh{G(jC@Xi`c#Qu7>lg_}XqEQ|wN`t_W2IRZ+B2N^K2`_S49+!jN@u$C&kHC8^Yv+T zbS}7!0DQ<}fZPiQC=HPgA>^>38=OB~B| zT86R$1^@EN)#PmlLhHX85qRYwQrz1a!+=$IUj><(`}V=t@I=TPUG8Pe@@3`Z9D~Xa zxoK%FdI+2E5qV@%-VY9bg?1pR4a|81sNnZz5o@UO2-W&=P5Cje00Ei9wqrON&%z;^ zI7StNY1{DDQFpC-)^M{QHBv_ckPs@u)ZgfWC45zKP$m=P2kLhOY3*P8?FQCaX`=0> zzdNkvPtk9Hs18RfOdu?|2Hf)kh%8WZsP5hS8hlJkBQv* zibCAS#>X33-?~R-k~Pus-rgU%1xTJc@kYZtYX0U9*N(H0A=i&%kG+9UtM19??5&V; z_WQ~)`7#*8Rxbvbx>S==L0g1_h|2=uX@lU;{-OYYOV1L4%7I-^9=tD_t|-vp0Mi z6-7{MoZ6ig`GNn{Sw6Ab75-uC*hfJq=|`OK;o$ft7R{=KJ5Rn})RTlc*Uzr53_T`f zE7w3M&B2X2>6>#-#;9NaUrp(+lm8)vWlg;9y{BE z9|3i?-9`(1wU_iq(txNxediN0e5hD1ubKpd!!H$uK7*Z7m z2AMtfmd4|>4b5i#uo>3E+k~nGy>vJdcw%B=6ycUKvVH>0-zx|P2EK`V1S4UdY;dFF4uT_Sk3j$8japAxlZA6=%wsx96w@ize#E&OaZ1iz;?utlqPs zMkuHHsUAHk)-tYjg<9kK@-7n`9X*1Isct4Ml6g!k41ozr znM`456W!MJX7n3^6o>wNo_;)?R%lBL!j-R9{zzPO9aAU!vsC<=dnhG6pgP8xPJx7I zE2-YW4v$o~TKjbKMB^@bMOJHkLJ`jzTbD((?(-)uVt{BXn}9d1m@4LQ3xk4|H#l!XOH^FuECQzLnrK z>i%et(Q3gq6lP>r1TcX}U`W?|@kjAu0Z!@|MN3B~CsP!0XgFdXg@mZK$6JrV015U3 z*#HUqu{m0SgA`(}LAhorXw*LfYUvaJ5iIANT=g2m>zlW3u@wZDa3>9$jU5uXTQ#;5 zy(%o#GhFQIp!CLYdbHz7pg1fYV9sQU=73D=P|gx2D8yDU<%4U0dF7!k;iKN{a_tQv z?e9DbhkC2y1)&w-g!MK>5wYoK&z?V`1t2{* zRM*RI!AQiggpB84xaNj*ZH@x5O8I}GI?(P@D25vh{cqeTfarwiSut?1aGCcd=p{2~ z-@1sF&m)r?&~{{W^}U3j@xA>=ehqIL?waA(WS?cfK?}NWe;JBZs)_7mO zHR@tC^ib)p#8fi%Qa9g?RdR%bajpUFCkOB;V!DCKf>>m~9W0Tt7?atxD;vhV@Emg( z?F%m`E+($6NRkI$qIKxdm&l`Y3;kDIL52z^XgD|#3dqGQ$7f<~&4q~>kzu}Tof%a_ z{nW9aQLr5&IFhD|(v2_%0E-0T3L7yTQ>qIRYC+OAJ4|N-q1Q1*w-St6gBa$HSlf@6 z!o&%acte)mP_)_BEn9F5zL0c<0|3c{FK9Ozrl$WiyEcsT?U4QirD0;TipIof3pau% zpQokW&&W(Qb@Aa-=tf6Ipd>u9dE|438y%?yp{U0^2}4x!gafuga7}t*aB8%A{bXu8 zej-`E)0xrzVF08ofI;Ws+Dr+g{YFH@Ko5u0!-QIDW9ysXZvA5BA7QN=jbnjABVzQ9 z*g+nNA@1Ewzg-G#GFkoI)ZY5ED+&nUCBh)QdE&;zB|l0*WYdE^KQ-Bf9u0rk3aVNj z@i9B-rcI@m=H%oAWAAXVvmXZ}clhCZGqjLrD`T{ab8;?2`1=Y6w}r-QoQnByvWSv$ z4%^W! zsYpEPG{UuBVc>p=#dJ+NHCm8jkS^S^^xsOf{{gna#;guQO5gkJ#fyk_CDhO_C0$9G z^z^uZ?^kz;s0=rHa-h&~yvq>xPbz*ZEYBuyW@w}yu3xv#)=^HroBMp|ZoB>6gYdZ` z)6Ifl8rozbu zoTA>-ez6Gg`0{zE+HCyO$B!le2SMC{uTU<*>3B`g{;Z6xe_Dr|%Y1xC-emcuh4QPj zmNpo|;*FT_zQIx|FVsaKAWCQ1H`vx-MXiBV?Q(Gr|`z_V*?ruXV5Fdl*;6nc<3KMnn^gu!rbH2*qvn? zVyj6eC|d6;(}-c*S!KRm7uMl4NR0CR${ek7uPC49D%|{LcKR2Ez0(RfQ%$N=7$pEnc_qF85T{!d0S)Zp19R<3u|bB1_|Q2;E&|zC4u(W$Oo}Qh2fIt&z04`Im&GcLcSRqKJ4%wP!%bt zE32xMkV*_&=VOLIbkQl^3=0dy1&pvW<+X2s>7IbDoG^)yJVE9a<(}+qR6PFF)Sbtm zy~K-BIe%UPSSVa=UcNl;-;5y#xNE$;rU2!ZCuIeXdRKv9wt*lhU`8PYiWiejb1SR1 z3WTBEg1JF_7JWV%3}Q|TUn-T9Qc}f$nWz3015aKCYLpyrjl2|+{V#my3>=XeOxDK zE5Hc!^YuOBUIbFteLay=*f1|JcKst#=Uzz2r${xl{}QGJHods|YB04*8^OS=I4{pj zYWsF^A(MgLv(vDEMOcV}d#GK^lK7G?dnsl~*M@cLz}Xg$D|)6ULWZ)yghsx?ULsg! z4H)`|B3hE*osHDwT_eP*w78&Puho6fy?=CeZW5uD^drNFU=`QxL+};2v!-28?Y=lU zQ_7;7ojN4}i)OGh;hbiIDb*=|iRDX{u8SZ3tTNOWbOuB@Rwrdm!-H;j=#StRBwr74$%W=C!DTl_4{S- zkmro^X)p8w=&qmKV_`OvdbFsH)j9s=Wk_8$@V2fC^yDyJ zIc)_YrPk-IqnQ|UWHITMTR`yYdR?%JB^PcwCM^6@VR>c3IS-o&|M1QI_G*Q){(rwt zMrGD)%v0|>8|ml}M^7R(#yf-H9;1}@ilhT72LJ5HzTAs__6)r4yR~LYV~}iv^Y7 zyBQt3_!k%8G~8g`VFr>$owFz->*Z+v3mO=+Bs~e`4!KvdSWM@hOU|g;2$cq>FR3V? zb09t#m@Jy-J&wZ?BUDRJb;tn-lmaxdRj|9MHaN&H(NBZ40%*iLpfYHgFB3&PYzVcF z7Ubp4LKjk#WXS`#GPQ1Sfwn^Uz)-9L1tr>F{aeW?rwa^3aKiwn<>!~@lvUIwSX-zN z%xu*8wQF9L=CzmInw+lV`q;nm@(&2%pCemvi~Kx_hd1^!?Eg&r z+YOKsdK41p5ZAV?YOs|BIv}S<0S%y0h{TF#uDs5&w@$%)ZR&vj^yz@@1j#`tXmnK4 zHmU>Nn_f_=Ry?{$#?6jkTBN?KkX+|4m(?f>84lP4=={4Ho(p^Z7QefHHbB3#^{I=& zDl!ZT??-t%`N?RmBJ4cJbU>i|#B32cVey>}!`l4a*kr_*3g)aPEkFq;zv_`1D z+V8!nplLjp>+|6J4Lv9(_R;mAIZbWZpaxTU4lJBNLaZhmW$e^Hlt`=|Z$ z!`b6`^GvUncL{I0%YK4W$Ui(jBO@WVoHwj=CcZOG{$_Bwt=!a#ljTVKcIH-Z`_;?u z6hk&|s`qH_8qaa>U7gBkKmLB{a`V{eqp!vc=FWQ8^?vcJ3HoLw-YdoB)!5LHJeq-ftpH0Ts z1!kQI%QDwZYC6#npE|_qiJ-HR_E=2xwi)ohGxV&w>cctGc?#nAac}P!1-toTR^QJ4 zvT@RAD1Q z-_^KY@UOG-IiGPr{9NF?IR|)xXuVtt?_9?(`KQ^iar5)_UAQD&Om*;752~$eZL`kG zetzIUpV&r^{IYO9$9=F_7%6pc`4T1U5Vn);>pM@A=hdxktby*7)MO{ApQVo$3+S31 zhFRsVi*w?orA?F$Ei&$b7XA6b0Pvb!I>k~7L z(_HzvG1crS)<_y%`ORS_b8Ia5sot87YN|xbo6_UYv?vsMYWf;4%qtbRDD_)M#mvlD z&J)+zdoEf9UJV}Zys{Y9JH87R1l#tmlFec)iExkNJyj&TXPGW^=>C!IiOdT}9#TB=!IfE8ntcLNJ4DoN*zD>n}!C8BDZsjiMN@@^}!u5Pt=`3XX~=6mdA!ZFs7H)X>ei%Y=1EG^>TQdeiA z(A4+maJY}o6#W$J>7t+z)AP-{Jb}r_L*2wlQ(=rGY@C)y% ziY=6Pmfmk_dj5wuPgzyHf8+Y~D_*#IeGKASae1@*7{`Ni-@J5>tt9K6euI~-%Y$-m zMOMF%7xY$%@meh36=Bo0*+AmrA%|-!Gxj}Y_`R0)Nlund+@2Edpv+;|GsB#1)we~0 z8lReT9kg_2P-1El=Vc+zdLW2Kq0A|M;GLtuVEd>Z^!TmqJh8XT^aASbt{bcJLuSsU zP)_w`afC^l9Y1Zg;7db>rs|l>N*=Mjp9AL=P2*eGof+d}8ds5~)H@xY%`cB7~pgWh+7^O2;QEmNs`t)5%!P?wf*mVbt$ zkTW)+ceXzB{BPy5mdfTZe`s3N}m~uwJrOzr#$Z19k4g z!r~HA`MJ-e>$lD_P_KUD| znvkptn`y6sY5wny!PnOU&N(!Nh0${!4O3|nnP#G?8` z>FkfCTslXOQWSMhMSeR!(<3%={nO!Li|)r;L@wVCSnxDC**rFWE=BgImT}j_xcKp<+>+o^{UsB)dG)KoVaknsI~RPkrmcwVPh0FbYR(h?bh4VCx57P zgs7o7WeNAZ-fig7&8@VXOYEPWGkQgNJS^#5Z~1{%bV}q4t;n)mXI~-J*qj>AK`S`^ zYhb}1yWY@}#Fc7lvh*>txHHCx&`5uxrY&o>@PVa8nfvfr&MxnH%15papRlsBIJ0CP zD_GKYtk%|lkGTsw@+@7edIu}iDJuS&(-c3)kJlc2R$9ZRQEcn7YrI<~EcQNB_zpHn z4bMyZQXw8@a3e$cZeHKv$b+p?U6Cbqfs+5t|TQ#Bt*2tZVD^C z(qdBMFW49<_M!WMh)CHpy`0lIv*J2`%$%dx^UHtW`^oLql@!WSfu9vG1SierOxzm>}X=WoO zAF4inX6%B)uWteR&z_fK3-+wsNl_OROd5I)l)t1XNLj*ZJ6{%S6)WQ`+#j><#*4We z3~BjBbNR(fY5L_S7SUg@AiD#?vTceVVeA~+b6 z4{{KzED|z>z=jNo@7*c=UG|O5O8eL;%wg-o>MW%$zkfTeZ;2Pw_%2MTocmmFr{E$F z=h?FvoAI`ic>twovNmqo;z|?Q0wBT__?S^I|Xhz-VB$4lTZ5lZD0K!$O-o2 zIj{Y@XbzsZIX=vw9A1 zNcrEXPTxPQ?b)C?-7{lhIDP#uGUIz|1<5n}2(1w!@RVvE0P)%zmqPZ9e>QI2DrLAb z^As*MlOx>XSkR;ef`-9I*faB6<`?60ZE0$wE?(V+{z65a@1*9fI*lZ_qW+e^f-)YnahaL$i-ozAC zZ=5~9@xM?_sg|PugejL$v<@Gh1Kvx+ii&Rwo12?=xAs26Pgn-Vv^T<}FlJc7&Hd#Z ztKF}0QUB~Yb6ODI<^v!GxF}%N8yNVCW&)MfjneSMwsE>v2)YwNi05sw zjJ6zK#yw4iFWrpC{x`~Z-?zm4zsP8hladbohh~J2`fKcCBVcqwSp=Yn0B?5nds~_- zFp_{^W&i%`CA?h7PzMkbk(|67M5KKO4_2i=(wcy+o*%R-g!F~!_U@uCe=#O#(PZ+8 zJc1mI5OE6z0Gboc#9zc55-`*{$aP7|O;nUHh2X!urbOmK@sOBP!29~m8<>u+gPA4f zt5EmmgJ$Okp=nIyV7_(CC_uWaSU==NAcM90U@$BfLF8Ty7kE(8`5-1n7~`FIv4mD? zQ0PkqstbTGG?KXhxNRV`fhF_WO`Ckx;{m?G><+@`oA8;0riLV3f>}nMs>HZh7U2x0 zOkDS!YD{moFYo#e2V64wqSKv{N}&=s@^7tefHPd5-(d!@+bnuoiZUSjMiNM9_9 zU_tl_Fk*lUe#qAdRln2`;&#t3bLmCLfUb^C4XAxzF~hhIE(%OCqV_Ufx^sDG`y)-} zdtYw*{@tb%yUi5(E1;kfpbR0Fj9<#)&fd2I1VKI1iI{NHwKLyAj+*l@FnI_JfUjVx zr+jX{$E#~r)1LQt zH)I4Y5R`v}ASyUU4)*mO_m@bv_#z?=8YzHPL>8Qd(|ANdbQ#kx9x)IS1+M;_c)Y9gC=f&nNNhG-aRHLU4$r@;%}W>>c=!Jl&j_?4KsvP=bI6$Z z_+Fhc7!It(NO2y<9LP4&vQ6u@&4+a?8wl0&xP@&34;maf;wj;S=m?4;@Oscps@t|L z2!{X)f;ITFHpzDxAc%_pG7>*WoG`Q^EEFtr!hgYpG-(tJJE^K_dj@V1A?vsOb3j#s zX`~M(v_@-AD+O4|9Om+Q<{5-6w z;i_q!w$r&L5j~I*mJ5EdOlCC;JC^6B&6Fk(3 z41TJb=I7z5>t>&|_V_)rlRz1dols2(0kyN1x)8K5se42@|4%HmTL+n0GZp{%W8O$Q zP+$%CLli_vQlx)iyE;z?Ys!{>lE{i8w=0Y=toY z#lns#DobI&(yIBIzkeP$eW_wiKni$hU${6j-<;4FtN*7QloQ54_(v+U}=*!Tp;~gAT+rX=$x<%Re_2*x@A`2y}z!6A>(i zz=1?jAjc-Z;ll{g^=PXg>l#MT&msu}O3-ao2on@?>tbLENcs9g74(7NweP{~0&I={ zhpjJxt8rc1U&atBq`{ENkkFuv(MltU(jb%;g-S?7q*~FaVHb)_jha3~H@Tn)eqlJCO#2n;X%z^!iHE9&^E*R5fqPF(hw|Ki_o z2!06bhQ=LSo_O^}#S;lvuDAD>kQn|fg4Al$!o zCj4L$KL(x)#TCjhum=3;+R0asJJqgPAX{zH>So`9p-D2Jfr$-PLO+;U%O6RHl9-u{y$0I}d!HNnD@mt!qie zL(Db+%SYmvc~8B?T%RdRAzKo(z9cn?7vwhBcg%z3`1i_q=Mjq!qd1id!9%wLi414a zOx)qt{Zg4{zP!yS0>p(-zqh&EwmZ66WI5ZVMrf$r5~feux`olw?9|fiB(6~c1YZP4K@JLXHS9(qp@et&CYSh2jvrQ zNHGgX2~qOEVn@CO*EUcNGIqHCMOE!`LLre7Ta@y75z?LPxaTaeodS{czkEhk=*pyVJN9^U zG=aZHM%tCP9AquIHaVM}0j2(!qVp(CbpTF=QaQ?Q>XyZ*UwWcwuEH zkS_+bobYKyuXfgRDLG%YddIPU4xTZ1S!E6d@MlQZ@!X`#JOYaYmz|{ih-Pk?7Qog2 z1@bCsd{hBumZ>+)^OGm10qRE2&P#;EgE9+Ya2k?**!@UyMr>?sr)>4>=U?;cVUYeB zASRSRl@Ip!C5APgp+Pf@ziq~m=~%G7np1W=HkDqV1UM8#PXrPt?Ha7AyQ9pR`;GIh z<{O&9N%NlAL}whkc6b71nzq%MxPSU>SVQC>vV&l`!IJNE!1>wB=CR@T< z;aX)n5Z(++(W6I?&NE%shmGPqn1P^^9U9(_;(><(1>Zx~dWfSy>HNc39tUwJXHn7f z`n<<$gSAokys54ZVy(MqS)KQ-?rR@ZK0YM(fGCII{qKzsn`S&XJ^CLo#4Wy4iQTY} zW9`sYxrzuJ{zTKAwk|x)U2}4BiIJ)?@Q+;Pd(M~&GpFLEjcJw+KLqxw(DI5;>+`2P;p&PV$sFHhol zy86O;(y`0NdC|68c_0LoW~2&ol6Z^(lx*&R{{D9N$%Xe)N^JZkfZl40v0{M@+% z>T}RuonS&w)->ic&(LgDr21OZ#eY9R7rB&xHTr}&lnQ!E#-4$Ck>~6%_kp@Q10pw+ zS+ZbsS8}P>ESHN_)f5gio!EGI&9xiV3vLA>?r3GIS7BcD0#@y987WnhC#S9ZqBRm# zt@oNp8?I6`)b14we&s!Vt@)Z=>YFzzl(D=V`X82jXt-v1(>?TOheBA{hpV3kA9ZZ| z+HkEud$U8-tbe!w=Dmq|L-32cLg0TChUzP|83~$)bKghWas(vl_gO*|!Z!xTh{$Y} zAt6B8VfC?{lVO#PsUs{)@;=qTu;jBXn@|{t#MmoPD61&{`L{h?{A!5=5Z? zBX0`gM|QZqS)Q}~qT_ojNZ2RSyF0g%uwEGHH4l(H05 z-bKDwHvh`!_U4$J+1YfNBWP@ogKvBYu6IHQA@Rf11(KHtI1*7@!Jq>dUV=)cNjk%? zWZgjvNz@21>M8=KLLQ8c1J-CI1mKyp;ZUiCwe{?H!_&XkeIeHc1k_y!%#O?YkmqS2 z*qc}gwj%BxSSmMskBGAn{3{x*X4HCegpY!4y6rZ<{$bFi3AA0Z1FHU%p%j6#wCSf&p(mt*>(cIwn>D0E4t7Q7X@8#Fh( zOSGcN%drlw7qo5yYmG!k!1)gjf?8lpKG_7J}ng>u5?$t47Yw_r&eKmmmPVUJ-pcP2|y zT^&=3p#37xQ@RXeoNs!gAKinaeI?4h!C$|^uTSHt#OKLqA%iIr>?pAM1U+I{J=m5m zo{&RFz)RxEm!UhaUAO>EP*on`ix@NhTwRcIE5NBQM$jJyt=8ay*>`+e<~EWz>jMa5 zU1e*8Y?=^?aVsoLtmMST1d~h|XU;^&6WqD3F^Td_*4P8_u;YKCx+Azxw*K?~e*dMPw)@fafB%fzfmIb(`PcmY8?Ei9Aai;(h$Phf z%G(=eh~TvjWE{6V@~>BQdlPs2S&`a*-v+;HxW_NgDir~uIAc6n-;k?`iGRa-ZFw_i zowL;TDh1)Wp4rD*-!txk6b@do;S15ZJLR}?(l=n0dc5+g?WSQ6Bj?@haiO|pVf}mM`6T+Df=$?!3xD<*EfBX7KZ7y%wO8ks!*xG zL18|mtR8T-al)qi9o1pQoTgL@p0*3wQfwC3chT37Z8BV19flJBW-?tPQa_#_6Clw0 z2wxu>7Sv0TRZTA)_`p2EID$yu8UYZZruiB^$zb&>DV}Q(Mo{0cv9Ti1cRw6tl&b5n zCNfcRxFPrwMnt0ITOwSAq(YV+?1^p#_mdcA;Or{(xG5s$Q79(Q*#^2V20A?Wu7-&R zi8yu&VxETl!`eqK+S{w+F(9g=TF>8+Q!`hpX$vC|K}fP;5+tcH8Y3ezaqR2;S4 zmQx=V-=h%0*0>SXj$Vq{PRN~fgV>Z-RzM)#gBh@Gl4?`CzB+{_ zhNb|*B~|Q}LyX5ZSWcMSS3hDtJ+eyeZD==oT z28D3CE7X9;&s5C3h>c{b3f4h1l{QGCN2eQKZ9|sP1J2}-u8gS;w>b(MM>^aml73;tp@)-8 zy^D_Fr1rjj(GYiaBYG-2CZ-bdQNG$T3S>A%m_So!Qf!g(xo~ekjKKj7J>j| z8L%^PhCtx77@4(pSE0#%bh;*?AgSc{3I+3?pTfik&z^)~$!-dwShZHy(UNR&GW^PsoLW{8n%ZMh$iz&n1fF5wL6?=>=Z;wXHL(G1V;Epud0#Qc&DUdR z`!2?mU>JF+`j{(}l$6}WzwAWOX!;JrkM71?rNbsB28gn>f~e>SeLHpXml|%DIFQXb ze0;GG%-RTs+`g@EEwq7HGkfH+`d=9*`hm9ACi0pyEe7Q+2jBziO7F07N28M>BFx0xh0A8#PyWz;Hx_ASx zJWMnC$DoB*gPLJcFdZ65qZG5pkcpnv7ED-gUe8D4u91LQUKD!iw9L#kAbW};o1D~- zcZU3Bg=EcElGceQF+Dv9!N%5TZUv3)ZsAkkBFkyVRejq>nAiFHc0%dD8ainItt1mW z!?r0DkZf^P)g9uvRB=I^MRjHdt=0)qS-yPv30qrTGUKL(uD_aa4!+!dy-Yq%q>VE=`br1vgN0!;y4YDXu5xx*2D_ z+y;E8D08GUF)Yy8gHtUU$&kT@zLz2e^9NoNOy4J1tc#3-Ph~htA~Eaj-mur0rBKK8 z+wH(EXk);c9}gxkHolnnR(Y2F!rZzKhM7jQBplggI9h!Nf6jeC&p*(x!3Z53oIyVx_ zKq$J1`%=A$Qo=0Ewu!08K=Yev8xG?%MsVzPxH6(e5eX)a)Ib4rjE15kCB6peF#%6V zpvJ_-)B=c#!j~AxQ&iv(iU%YrwcQ$pWSm*;|@eBe>k zjB}H4)S=^5fA7&NhMZOCu8HF$@DA_4{;K^Px^5+1fP@udxVLN9t{ZmqP$NvLsc${s zOhHwYN3^VcE<8{=!kQ)K<3~8DjNd zZY~ikJH+@dypeI!<0qFVjML6eNv&#uM{-;8sYg4m-i8C3ppcLOfLYYl+B!OIZAcfZ z$uUrdt6KjUPUT{#X)%CNOulUx{CF{Ws;4x+SO>J>7W5O?R~J$SM@M7O?k2r@*BN&I z{vKQzdrWzUjAMNCZ{Y;bRgGAYZMq*?i0^F>!>)w>?WHqW%%5vF;Y}WFJn18{LC-EO zB^?QcfriqnC@X`tQMtuu*WzXw1!9!QF7p$14k0*{~xM%r4$A|UhFiIvKz&iCZ!X%U-8UmIY$_oBT??+ zbd}-JLfkYAg@g{{{jr^)T<|BWqn&}_(o_{pCDql3QKQ|$XNxz~#?CHy@TZW69!sE# z`~Ep9ul~P$LI%1(;H$o3iiFP8;aU^h`x&7zL^~P?#YRn%$vw>MbRkv|y%6zuG3wB0 ze6;S5VY)Jf9I$O(Iheb*$=*PXwt$jzvEwhztdM~NR+Ha{0vzkqOidr{&qWt>H`ida ztz_W?h!t@mpd_sHw*2sz33vDfl+q6$+TK=M97I9k5Icgeg(b139$n z*8TGB0drFF1l3?<;qAn}#4@fbQ^cHZP&L$-UR+uV3#1jKa)J!Sx-Y%lm@3YCkW-}^ z=ZuLkxxr|y4gfC(&;KEIh1nGBH}AtICUB5VcSZS?aV9o}ec!^l(3qae=Hl4HWI}8H z{P`#ao(5+s>-1609gh4O5zAxxF2jFyuSabZ!W)$z;l$7+nq!JXvO%zCo-IG;i@%A)Se+>75ubN#e{kXs!5SBwQ&*~h zaz_E_Lv%(}iZa_NR{r$AsSC5Ga3oSb@kQ0B#Zmt)~;z zny`b|Jod6T7!c!?bcAZcUWXD&v);dZclp(Dk?@!FPBqw{!Jh&PsN)UsnE#E#1`%g*4GL|8)Kk7l~SF2bP44=>hm`Pq`k&YhaB&jn8!cEgiQKU zc{`|16)zoZi7yluYA2otcwhUWef8B23k^-S(|aI(L4FiIJMXZu!yL^W6A|J}B03Zm zIH?hyH%;nPLwQZgM4^}gds>LH3*7X~Q3|7fJ8x(A2EViE$C-`1n&Gl`4B1{(n;`pr~X&!xoZ{fii_3#&MW|V)SnF==Mh~7z`qK z;W|7jT5!N`W>gwI@;L{v$+q#;O*nLQS0`b^fe}an3RdwuCC6-S%Ulc{Xe6{0T?bG|8(g{Zg9V-?sWye?m6n@t#RE@diT5dDv{la{M7@LsQ zrL#I8y$7iv{EO7whqnmkp`pQArG!#%kU$qonV6Vf&q{T;iYT*(4ag~oOG0ry`TlMM zP@F?K_VLF!-P9%ktv5ir!@ww^H_K*@YM)mMm!SV(#Sppofutjw&V0)eMD1GVoxdfI zbkIn%C_vo+?#x_dB+a1-i~68k!z1H~`Dg@ef(Qmod|1%&C5`F{*uH3aPKVww*(}!w zFS`P@0fv;HVU!7p?OJV)gx=V56jzuE{3@*3q&&x;%A9Qd&$M~qu66hLHWJJ^IZ$*S z4LkB76glPhZ#epqhytOQJh`|R@ej5@PJ#_n6WzJcD1f3a=bC1;8d)w0WmMHs1tV(e z1!s;>PgBBcAbumCmNC26oS+@G*|f=N<0kWZE$Dp-@JSTp&}yU6fSnwnNMPfpcs~rQ zxg^yjQ|j5ZL`>&!*v(yVzqxp^>X4W3n`(|^TT@zq+pH1Uqu$HJFySJ=Ecg;+wz+!s zOqjCX-PQA)9>u+M{KI6S*JrL+&amuHy8q!G?Bxw((Ad3(e@w>d8dYpfSR}|zHrj8- zk;&ff|KP(19iWcD6h#b*=8yzjIAJ<+4KWx+wzrSVNpo!5BwHDc;Bgd>KMY5rBO>bq z^c>;sh>1MlO7)a;m@*(wI3Eb34{lrpw#QQ@>A6-IQ6LE*azIB}422pPhnj$%v0o9U z5%!!`>=}+_ru^MvC+%>}@%i8q8vwVBNGu?DX}e+=hc%;9%Sf<*iz;On#e;xjs49Y30=il1jUf8I5n#$B|ufl0M2j$pXY_#W z9JNq+%)TZ4GlM@*Y(q_oV>Qk=CG_s{KjmPE65W6mAkNto!xe6!Z^AvV@|tKmuk*O6 z)cgZCAT4N&QMjMvjS-hp0%l;wLy`_Q<-q>t>7mrA7}Y4CHu5B}sLg9sTSd5(~t&-SX7eEOt`^#uVmKMHAR_N|M5BGeXZ z5|(}zII2OdXzMTJ?V{q@1bd7Sq_$ogUGLhlEWKBfFJ-&n7nku$#+R z$ngeV03j2z0fm@y(Bfj$GHu13l3&-L_bbYjTH^laUdGXq;n#j5e{=Kr&nhguQ()S2dLI91k8iA2!;VB z-(W(Tq|aw|r{v|B+#+}aI0Y&g10o*fCTu#Ug7_41@D;4*k*VDbu7PO?*h&vJpY=h6 z(99>5RC@z>QDmb8oKR!M7#U!NX6|WMeSpL~>zI{8|@%ZE5uL-ZR-!0Ln4!RoK z=A5*ni(r~9Ok#+!<%O3Nq4kGc=P%ptopWV%j5AC;$(!M@EIJj}9H&iZRV*J4YdiD`0K0fV ze)--a$K|ysot(LYp=V@f_VV&s_~s6Axcu8y%-(am^JOp==)|lm7PHzw+`3#2v&SB0 z`2BKzSZM}08NreEKaz1;<7XZ#xptQixE}=0yoH~=)K?!&j5JGXc%BDwg_))6EcC<_ zr@Dgrv=-L$FgZ#RD8g_1wzjpYcY*~iG957#e&?p%bqA8 z-K4uV>EXdV9IH=Ye0#H7Ow|uO6bVzJ@I@qg1|a#r{id+H}lQ z$p9DAW<0m**xi)jik)-?omMsWhNNGhEqLJ}Q-l39nTZ0hs@=L$TZaS3BuVV_h+Hzv zJa?4LHDN|^pOGdjjQD~$?wZxBZv(|aN$!gf607fTJw#idmybuD!Zt43>uRh<>heuz zJ_iECBx64WSH#I@DmxB)W2jdQ3kPu2)WDK@m9irV12>W~mTp?i!Ax)3rnB`y_^o){ z3{0jpkmnHDE=%%wPM3#Y*9`gHQwd-uQ~~18T1CYmGMd6Wz(r!O`VSH6zN_t_A(Os7 zK2{A`c5R)VyRp%d4n=zX`jrxuTfKGn0m$_~!tELi5OpTSf;`jrK7D-Hz~BIn-Qa|- zGr#3k_xUaBO{mJO1p=zL3T*w)mt_(&n%Ci24nT4g3V>SSAZ2xr0v+MGOVX6eg;nDn!q@&SakF@({YP;4WZ=uaxm1dj~9*Yp)zta67QL)`j!!)oG5U(1!mHWcz%7 z@qfTQ^1%~U%GPN3{s%&XdzHY#g+o&nnGWJ7=s(0f*FGDySRUe>u==U9f-v(qJIeg{ zXp-q?=KU#h-1~l?9kN4F*`Sx*gU8`F91=0&Via^us|WP%HzgR1>7Z&jVPmrw`&Tpu zb$5V^Kc)&)fTdgl_g9E6M!NCt`z~C*97B9!&ddLlH}+E6x)pG^@G{hBL1!1QKcs?@ z?vd|6$78VP`^!b^@4>FIin6`QASnP1swmC`j4KERil6xlta9&8?$2HGXV3oL0&{As zS3KMPOwNb9f3~-4U|weW88v$exMA3$-+Rpd(zJ;Ct80xReG{I3jQ=%(fEyk;QtENf znJ?8;<~L{o7rw?cdy+5LjwDJ|Fet)t$8Hkq68AQz3Eu&HD+nIq@X5&c#TKOq^m~8I z&z&E1Lw9_y+2bGIcc(agG~W`Z32#Zw&ytxcVC@)U=XFF679Sq2lFo+nzAkF0Nc2>9 z@V;mys?b1X4Xa*A?R?JHG`>2<#fek1=|uvyd-H}E?qG&)#IQM1GJ#<{2@9FX|2&#> zE)0-J^IL)qF#`gf$LsHXW*VO)kTovi2@4CgOWoBGVEEoqjVSlA?48`J9(4P*>df`` z=f*@1rF4gEPq~ zOuaJ`PCeIBJBMJb5KxA^$a^82liD0r?*TsAM0=Gl+yrn`LuPRtjw+6=2=ST&w5cH$ z77mYk!I@MuG+add{ZNhyWBnFF&oHmF3DqWPr10+(w|qhsTakUy57bz5LynPXg2R|`uTx&G7~SvheLh1yM`B-OqI#q2Pk!jM>P>`y~Ui`4p16Sv7?gx zxXUViVA*Q!6cL^y5TUuJ68TQ8X!0O!u%*D?m&(&=RglbQ?zQazvz{43lrz;rT zXo{$gZYiLuddG8NN)2pn-#C-mg0A4mRZ?T1H`WAorkp{iLs3w*Vi_p}Fe)VMWH4IL ztZs~RG7u;YE91iYk50xcfNQ^lxJruNU%g)x{I{Y2t?mT#J|3Q53!@)ytB;NED>B5w zVZg|Qlu3juiSqRqxWlK5CwjM_dNR$vFar!u-m&@*7;e~Ed$gHYexZ4sMSeXL=<9K6u+`q<#jGkN%l(q_Zt;zjXNg8W`0XN|<;LSMs-BEEKN9{^< zaX`2-T11q%8>N>Uuy|Gwk*I=XZ_;Z2O@L5VmBZmg>a3Z^IEd z!ss#)^V!g}wC9>LXW?_s!XsflOBl%0vltTc4Wzr58pm!KPs?MTb4^}XfRSM2&`lqo zNKjcSCKA7(3C4-2XPbvTB>i~)%-lhg?xJ$XTn}CW0t#Y~9YajnX*Y#HW==JT`3lz7 zz;cMi-gAo=CVY}u3;r*6LSL{NG+rjE=(#!iPxYjVvGbr-%=ZAJF%-5mC=mQ!E~v*K z{sABc0g=nmA>LMi)G&V61PDEh@gw1RpwQXBE;JBEh5%JmM~sKS@2|jI)fxqsBK&b= zOzR$(0{ys$!NqX*;pOl(JOa+}7z@i-k}^EpW$|E zcPE(=18q}mnZz=v&U3z&mYOOKGbm=W8{gK$Xm;@fckpxBSy||aS{3^%AxfxvvgZyO z!2NB_%_`XCqA|wrsj&ofQVq6-znn!!6HGdJ8$AsE7cRJIxb=BJ$mGpX)+qotis zMQ(BtR^kkeP$M|vr<{x}&T#eeYs>1fUtP&o=7wyf sLIlI>es1C)7S}^Z@9@*K% zG}W0fujGw*PdEQzsmz+=PcvuUTRXukUPyInzo%^a9Z+OoA!lmBD0+#_} z{1w}_-QOFAR4q`+mk~->xPH7G3R5B(y^!6o;wWiAQ91ZKW>{5B#jJr;079G5EkKqr z0|(X2I*0!g^r-YHvZRY=r*Z^LxR|%PdwE4+ypGvJ^T+4lAz-@qs?qTm;d?^DxMIZ$ zl0~f_3UABe+D`v3spquToRZf15N7vGV=>h(59~+n54ST!s_4UCGB*}#MU1X;7iJX{7D_vKyqV2R-bn*WFwc!WZv*5qKV*L>cHmOTGpFp@k0nhY%Ae9-`v*bjw<)! z1!b0Z3GE{qdU#y=1MGYf9sQ=Mi5A3m@0E4eW1{3B_EUsU%^7dyBUEW3hFrsdofk2n z-F~UN84Tdp7_1Q2TOdaIQA;n51f;Rc$_Mhx2OcMdWgPG65XyAfTt`kti$05PW|gpg zQWLQLp1ro+%n#KQ**n^cWaZiKgC_k&xH=Quld~x(hj*b4y`&j;_<2m69}1_N__+BW zKh8s2u?7;1`Cy}w5bq;)vcSPmOt)h4zU9zyJe!oVHE#ypM)8cOsGBaeSPO?h^haRX zRbWbj&V6=0VIqZWv0W}ia@;T@wqqD49q|BzAp&t3M;2!A6QOCS?RBFlPUypj@14g+ zMl>*P!ns$Tc5<&pt`4d{lTCpD z01tGl3u}BF@xXh}J7ryHpFu$JR+`SOq8x&$L$3J%8kH`S*`>w_ig10cla zNrfd9e*`xHytg@c4K;v}@(>@{_cw@8rnj3HS*EfRC&yznY3z5yRAu){-V9xX_ggNx zEc{BZ?tCtLtbSa0!GcHy(_&?ea$3&PRJ~K1@N&&PJ-!&|PNN`wUa`d)_U3Wm+WkHw zNKeUHE+`l+KnKf=IEyO21sv*%s$|-gyx?>vE(?u@q$4j4NP-wzwbLNUO*aUgSgIq8=f@!=6n2J*< zu;_k+EjLTU26Tz?CwmqWR0S^p>~~&y+yP0nYid}PPsK{duni7?>oSFZdDF<7+Oh}F zMuvQkQXg)o`y}B+X<`68be0<#1dwSnOXuE)|wxB9|mX<~|+Tegpp^)2; z6f8ensjpJG_7Oj&C!uU2@oP(VbWT-4CxGh#Fryp?)nmKyUfGpfYN#jI(#X#D57Oic zjtJq);(V&PZ&v3nLHt5t?K}Yf1er@=uP%fiC8BRno}ciP*i1K=q@o-2w=}PDSs*G_ zEwrK3{0vL+1v}&`H>V9EP!%wyNR}vMOBPV)ie7LYofaApfPTLaa~Q-cx&xs*j%>#U zv31JXp9c=d(Yuy30Wwyv!3@J;O2IFQD)7k`x+$~n76c6pc!{~FU`9gO!EN1$6`PwA zlnGr;tC}AhpE4EKX!?v91bo1#CZKQ<2;3)jL`?w-bxfP)K6&z_8IvT4xH4>P-mPVQ z#R=&fdkTOS#J|4@5HM+OX8mLonb_tUi4WNxMrTFhSMb1g1zmM?oKvs09>e?X`5wJ% zPSC~2E2&opiSFF=>vUI41p)ipxzhOJy^CxUpH7z;lNpxG`!J`FPpy!Vh6Q!; zGjpQ-C^bg zjhLNc-xdcTiBBqwSXhePsNJPY8fc8+X=^wB>-hAUGb2$cP+W20Nc1p>#;`Q}zWO03 zku5+A7gq0eUZs81fzn_e{%RQ^P(r~fp`cJDwFK?NK>v>B=ZwIHLtO88It&{CDbB~- z5!ay^Q0^;}*M!+fnfc9#kU~J)7C_7~iop3oGNwqKXZcGv>Am}r@}7%g2DI_*JOOEV z(Fjm98e$0<-u)|M+t=9CzM=+B-KsI<{goBCdcjEffSklv5lche=)z%F952tj{3V8DGABcDy#PCikbHcL9h*e zw>e5OggwhZ_M_1hKa=D?zlLcq%-vT=kYPC3tfF@7*DIFIlV$w6jO&RrfW#r;alB|V z*Pw%z+VJY?oonf8>;pg3(levmN3Sd_u?m(C~&%t(0GGW3eN<1 z1$AKTB5JZ?ueNr1rY)^H(OB2QHzIN_tJ~qig<$dxl=|nJUr+IWxsXYGjKK{gd6s7dpr&C!n0o-! z9siCS#INIYwG><)&9dHVzW%$ON}vO|#PSjl5-Qp>{9iqTWhi_efFsCtXX{Zg-gigD z5 zChnG~c9N<-7PPdt&%p2<L1*t2Ap6XEN-hkPMwf%+%={C}V^cZV_FFz4|>Ayq&2crR_S z^GL^DIC4|a(mYafm=6XS<(Ct?196sP^B*by*(~(9U^G>G+jfqBsx2!!-eY-O+MIo+ zd(wJ0+dnlRzDL(W>LuAQWP%z^d!DBWZ=~MSY8sDDp^IowO!{Bl@h*rSFlzzvMFHa!PoNi8#RPB#Ly=7X(UXJS0)^>^Lz87$mxfAbI$l%sIEstSao>NTkop8l zK6|_yD(=5EB3|n<@pYbMWE8*}I_o;bKTA;YlPq4C^`mrMozqA;kC8jsvS2epx)E@X za1^E@lCyEJCPh;)jiG!M?r*vk9q36N$Uk(Z??62gli8>b~8B1gF z?2rMiIJ$`H1XHA?Y0z7QF?ltBqcL& z@{qiBl)3^UA|)k6Z1R6YGahh4K*?|(zSDlf@>9Vv);e?MX(zQGad;GXB@kK84zF+mE8I5De3MI85A8=W;(xmz z-caqai5RTu(qIe@qodkRdw&U7wzP~)45;e2KoTcZUC>50neibSJwf3tAZv|w+F~-d zKpiyFeLpZz1)XU5oS$DaQw}y_WYN|Z^J?&bzjkyaD*RiJ{RQ8>+wJer@@}@9*%$TU z$&9TiCMBVI#u5%k5ut|$TY$|CzK0ohYl}ng=L0>#`*M0sYIW6_H0x;1Voo7bYd1BI z8L&+0F3!)GVC<>Cg*6T9E>g(U0QsjCDBVW7<9M+V$nXJVKK=E@1}xg&W6<$7NhQ-j@Kxu)j;&tgEbZHte(?*$-*ElY4@7|ApJQ_ie#2?ld?}U+U{`162gYT`#Ji zj6Qp4o_UoCPVDhXlf}*{a~IwTO>v$REu@q;`^{LXLIg+h6h|(`nKySZX!mDG)$XOH z*iT|E7Ka{)%whq$M8OCMSG(5RU^Mvtrqk8o#2G3LT_yBXQP@xpz``Es@_H7|*uwXN zGu`3>qRM^b5qW!eLNBEV%3i|Z=c^HN3)V-56n=~WkYz~|Gw09;f@#XRoxY2WpkbP? zhspEqc*Ac0DE)Z!r>D|woAQL>zA&bVvlO^;I@dGAs*aDw=R6zzc&yWFB*4exOXdY^ z^D~FSx~xLa7!{C?6wprk{5EM{h zE7NBqUlZpI$wsYTAQ3rj>d(qk5aitpmN}y!LZA)uQv;RecNI3QUq-q4>tXi)fI{Td zqb^lQm`11wNu=*^G?YvAU(?KfG573?oY&MsZMnQ{@jLA#Zs>-#xnyp&pJg`ntG8M7 zAnjnc%do%=Kyzm{38$>sfPi9zH;LbHKPzGMAu4&^*X7i*s6l4_m zt~1FH((Vu4S^VvJY+B8~#9f7ZT{{&v^uBU+ypzPRxY+)fax|vb&E-JhD*z!?bTtaNd|LHQrV+jMXz=!>gu=$d-1Hfm;Ewo zqA|DARR6}oSl#>=W;>I1oapbL1%ubed$sxPn<-Cs?kqT%^7g&%?0E(Ln`{CdhNaHg zHh8~$xFtW=Shj+B^G&MJy7b-qH9K0&(_cC-8M(Ko9Po5?45=Ydq&m# zIC~+50%K6+Fc+UuhyL)^)}mPr8GF^Zr>D&fuZcJ4G}LpQ4?<)`W|OKkER%YjoGUzL zCF$!^T)##(?T{bfcJa(_dwXf;a*2%RYEiZf4SWsz(lV{qpA4_+#?U)F%eGP&Mv3Ju zWz>qbRIv>KOXZ6h|60FSCiopH-cGf8Q9o})v)Zk?-jnxk>C+X@Gu%GZF5YMZwRLo$ z_Nr%r+AD&m>hH~;-}zMDY3ru|;p->(_@sE}=+O)2?5{o6u3em9lplXEDKPhp$Z1Ut z>*y3L%jA?J9tIms-oC9Db+M`SmYtk&&$g@AuabvvZe;-9IzGPX>;axRu)?5iFlnK( znQz_}YOXUIDc&6ys$3vXqfOR474g}qFokl-^5YF0`dgK(`1jA?Q5zVSpXxGsYwwzi z4n-rFe$jUA+PZN0d{&|V_m}6zDdAGW6nexN@?;s379k+Zhy<(X^9R zSKDT7EDYeGRD1iWANkoRwe{NK8Bn!H+0v4}&Ck9z+T2*1L~(tJazSF7$ld+>^>(aj%u$NRc&#ya#j*XCRjooG0eE856?U$peq=5tGB z%0jLe6$MPAB!3Fy!+NyK{_s6!%%NB1xFd7ur=8uerQ0gcwEw+(kuM+BeVi%4;woJ{ zV19fGZ~P*SOrtB0X6nU%KP<=|0tI-yRB-o+ji0L%W^d5~0Eq4{scnaY%*>zJHV9wkvhvHcINasmrF{MG z2|c;6?a1D>Fiyp-^^!g*DKJS#4pcm43~X#V~g9cB5I zUqm(v2?Z%TPN!Ts60@WI&YEYFVaxx$&^N)xc-EA_L!ETV>6Ex2Z7FTRtg+a)alDHu zie?7I&ay+R=jcmuXlC($sd|^5SY0tcv28H=!nOx}-+Z;a=6~ zz(|UL#FX{xM<#boHqo`?qZh51%^Q1n=|5b6IkGnyyY-e|y|cQ(fu-rMGG~VNxkmL8 z|0Us-`kx)IUA%CV=Xm{=)5F}K9L|d91qENe7)3hqE?F|2GrGhl#kAszd%o$SnArSn z>vdWsd=F`-J&#x~<$CeulrdIa&0re9;~=935n& z1hpG>nz|Qp<;_F_%yRWCI`xVlDqb&(4QpwXWvCdL&NbRIyZ_wRnJLy)D^k_rKL_CjM?|S{OSEdjrj)I z*_St*dG)H9acW-gl1(?%^!U4Fk4j5#SQ}hy_G0clWKU}`T%L^#TnAIeN>qr4BK7g3 zMpu;9`M&DeuZMW&bZ7n*6y3(W6sUR~+pEfHIC<(!u6>%h z(p7u5evshLpiJdA$c zw&&p1bgPJ4oYBmtw>I^P#bAs%jLXU7ab1F?Xy(=>xsB;Y_Z3!!Hs*L>&51mTQV#vb z2v9isGLnN02D~S|w)XFzete9qqOp4wRHQw)-8F$%uY|8uS~BL)KmFHaYRHh9*V~^+ z2Y+cLpnb04^@uD(y>G0ss#xvTJ+Jpwu~BbIgRRmoty|vvwJ3MjN=1c*mt9;;8)UdI z$1gp(e0E3f)rH1CK6!ljwrm$`bjpI+{XgqH=s!Nl*R22dqP}^HUtpQk(C%GXs@H=c zBcd}E>8c_tGF&)Eqo*6LYF;iV?|D}%fZX9^`hds4{ScLt_sWjR=@k8`6*O+^6h7Wn zRp5H@?0;$o%^KC70e*d{^1;pn(^st;Sp7s_<-o7WC~404Rwg`EID`%3#?>TbV#LbC-- zD*mIW4Pe+@N4l6$yzk*s+sY@i(VIvoO_sKy&J66_7WM>62ym!m4@nN-)k!8hnlR|D zm%!yI{>-JhH_Et1k5}!z8xGSV637KIEGR0JU~Qu>r{&f4jhM23f3emYwVpm4n}*V< zjw7c5RQ=NEvA8gS)=eFa)w(37!uKB`za=6xgt{8OkmgFeKyV3HJTgPfvhP{|RmXG$ zrh@ktmuB?e^D@}`)8oakIdg?&8+4{w&w!8}8)zwL_zChmqS(V&Y;Xw3>!7p2WD*u@ zf`kt;xC}KX$*ad8JRfa2rWpYC1&x%6&`bhTe&}sGp$i6>lV1WxBmq*2HLEMdP#V;sX#lE#MG>O=5hmz~bKn?3lIw}7 zBXN_9N_K5NG>J*HN0AX#t+$-GZJa!cC;wHvd(IK|U zsgG97bs80FrOWsE@`-birWT2j0ST3duHsU^{&yQLMnOf8n^wFyYvQ+L%X~1T)m2q* zSg|Qpz3UWR5NGrq;xTv3YcUKZwiXalUbrC7rddt4+E8Q0b#=JkZD^1u{2Cfk!`dT& z5+<~*8Fgj^qk^PGTASyeeLnw?y;=I9>N{oT9w@e+Ca1^>Su&r?6X+6Ou9n-7_o z5C#i|Nk@1Q-Cpp!f!Bgm_DWs!ySSwGOy-&z>n1 zegW@Un1e=MS||ZJG%OI_fJP7qFa%O?@TT`lej>bbEW;I;#>00BlNN>D$pOId!@-Rw ze08C_K|#|!RIg(Q7h<-J0-rUCKm_JQu$9{2Jqv^W$f}`<*!9eMMj&^hSGJIgp~Cet zItKg&fSv4M(ndJb!2YVVVNv-04#>+GMz{@6^UF*rAK!$4Kw|p?=&=B(!ga8JoA2l< z%Rh8c?JHWuy$Pu`8};lGVqSwWxf`K0d(B@4Zp&|*UbnNa|LJ~&rw0=X@XCAdyMVli zLX`0x0)2`8G0bsG%E|&*%RplQeAWKE>KGt;BAi~LMVr8S4+oj~A_LB7s+c(4^O8iIK-`~S`5jqMxgLQOe)~~JiSlWs96%NNGQukKlW=KxfdYhcYFMO+408(wf12%J21>NXy3eCORk=T7US z;s&($_wUF0|1&7_4HfZCmI$dVTja7Jc1A<0Qd5H|v1 z#b+vd@h{nb&KpnBq{oZb9eny4x8XY+amot;oG$aUdD+NUe3UM7RVO@KrD zui<;cT9Js#Dz?`qAAQK`M}{OGbP+=i4B^Dc{zH<7iS^lqn$9XJ2Ckr7Sk)yj%gkuP z)~4=`ccG|&xqu<-JeexfM?0@A#w{klgsD9|0^zPcg%ZY1bNG4ufT=a-HDjLQw__It zZ%P$=tLv%E+hQyHh*BCJb;?a>vFWB$s^%)$yA5mKSxB-*CZ(&RCkTop~G+~!Hprov&bw2ruDqYc+RWg%Dr+MH{Q5)O8~wBO;NAl9vcud zA$w}FV3@Jlyi`c->$uUugON!|^ME{?=Dd9Q@`#H(3)%&Tffip`B3y%^h|n^V*v3)W zrhy);;l0vC~M)t-x6CR8(=~JE6wISGc0E|8O93_|143(g(jhk#wN=wr}y9AyKgbP5n8kqF? zfaJ7%M!qqXg6;Ku05a{4TUpK0(kfbB6S0>K>NE*c2Fjd@-5jJokG%`4sP!!l_V(^j zNV>S>)qaMqog_9k%Fc$~kYdO%y{vmrJ9;Ol#|rMF@B{*paTe@}GG5q&#@)6Y&MmKt zi)TTS8Cm7gf{ex2*j7mN89bljAGkG?(#(}}(JqfM}={t!kL!;(bOW7XPE{qJ7Tz7D*HCoCv~M%Hvl>j@5BW<`ghUc=3#D;0=khn5gI+xb0P3 z>O4Y^g@7F2?>L-V8idp%INyDn%FFq2Mici}Na2WX76%>_`4_IjW&aY4j8}kGf^jwp zj3aeO@Sv8z=nROWphFKw&gI#YbeM?gJW%}2a6%+s=iaYp|74O92={fX?3oAs%-aCn zz;BNFTOxvU`53m&?MJ2ZU?>=ltALMN0D`%CK_l&*U%+P7R@oQa41Y$*#467h14vXp z^y-Q{wssVb2M!+e#Mh2*@Aygw>g)}Ne^{-3he{9pn+qv@UB-j8Xjj@O)iuUT<-U>=A`+igEMXBK6lwDOlz$;u zjbmeDmH?4H)YaXCN_+95MekrBo^1~T=1i47+R2lvEOf3!?1*403AOav!yxaJrDYN9 zRn{la-CK;MgL`3ba|)|dXZ4*_z%Xejof6-jhmGkd@M?u@(@(&sKS6C(-J-K^-$|&$ z5cuXX0}O%?RE!8~7ZqAxvwF*xd*Ca|fkCwyHn=D=-a$fH5)G~HV`7&9AX)ytP9dns z1un3jtmrat#e;ARb9aGd4!R?k#rGGw7kSxoc>NiAT!%`s^o7JC1`^}rj|0Ct0}crJ zb|_S}&x-FqJH$vz+Si(w81L+4-$jdPHHP!3NuJX-EItbu=4+^{KZDoS`7Tu!L(j=? zpbX{&TkKlY&sFH`!}@m`0=v$ae4;|(ub{bt9el!1ezWDLrFWq@*57X%@OOfzl_YGF zfe!<}IR=IR*d9Fy8`izXX3T(BJ9G;gIyyQjl?8L)@=M$qAGcObM`^ zQUA4uIr7~)C2STiUYwM5K|za9+t0J#>b~QE75C5j(UqQUVPCdgHS`n;i1vL=@Tx2A z(_wu*7VuBDoF4u?IP@R_D$w@01%} z4S_W1Anb!p0R7=W%m@2ibI_lO6E{|Bh3!bsv_9J3ufx$yth*Jv|JpSr5N15UwsA=u zdS0fi22B|0m+;lZVLyl+5Z0hSGll>D0&RQpk6Ob<+l%OH!Pp^w%HXD)wz0{c*VEk% zci*Ymrt@JkNSInl&*1w>+<{RQg8-8Szk^N{_tG+Wu_CAg2s|A0QsF+NxOwxx0+Ss$ ze&SoV9x`-6U~;?d*|Qq>M9}(x3`IH&G(2`)@O~z?UPKyx{P=ZjF~f0mn8ZHAYXJ&< z+t9ESO-?>GB%>EML7Rtu5Va96)*qUvV?cZ(zXE=upvMgf2{{He(xJ?Ak2}Zh&xYx- zo%+f?eewjf2vXlvm;`{AVS%53Kt|QXAZ1wD+7@9=W!EtsIb1kDc>DO=0Dx-d zGI5d|cwh=T_K3OCCp`+a>ezeZoaEVmCxwcCi=W)l1`_7hE5BCYlHn`ygK`lK3Tb3a z%uJ%Y-M#yD=+6Jg-kV3&y!Y{=I~+x*geFNm%84c#2<=2NltPK7twOU5B}27CMU;?~ zjx?c(5~V@4qluzH8dQ{$CY9#huD$nteQ?fs&hI&E-TT+Q|J=2nwH`;BzQgI>+?-alIh?|Zn1z=+}~@=bAQUcSsgCvlF(3Z1;q0?w=aB8o zz~5d;rAD6Z#mTF;r0-+>NiHLUWwdwPs5 z`zyw2P~D)S?E}%ecJ!p|VUM&xx=V4K>_n*HckaJzoO(8p>zz-vi*cOr=@zN>LK&byG%pwQTyatOjT`52UPIZw z`_Wxa7#G~!BH}g-M@e8ev=g>=i%)u}pdA>fI>enTM|0x-@}A%7E+;F0bNl@Un_^MG z<7HiFculP=`d3!oPLXw&XjwtWss-kJTi|nrWE;RrL4A3PWvEDP9{<ub z`59&gIxsn+XkXXxTeBEhXtSb>c zQ04Z&_XJ!^LPi86WKHKDZZ*UiNB|*( zh1B_!W~8Q`gN{3oJ!(b?Vmvs>M^DYVnz=hNC93)Vu+F6 ztAYUwnkR_oW)k2KsYDR(75?Fpm+&O;+&La{3c{2uIx+F`guS0;gRBulp_kX2$crIM z1d3RbtxqfGqWqDBBs}QnhGUW z22)30e>VS|IfrqN4?(x)1PX@(zt9$4-POqbl?i^m593`@a`IlY%{EY!G{yK^%m|4D zryb;>FfdiH5|aa`&>AqF(F$2r)exLR$`m<%o0;t*m*K>R%Ju;i<1l~!8Bf#G55Y_D zVOG}d9sk~mUg?X(Q~wMNH=e3uoYbUkgae@A2buNj@1V8ZBA-&vL4&pMET+uc<_fkr zY;7@TIJ5)ERD!3c%=NANz*_u-%SNS5z_vRKNYBq4<5WNzD2U*CTfvA6Me>)U$XvL`16!A`{y z&a>{E_Es9~XGnPjhUy0~RbXS~yM8@1MGP+W6rN{BJFu`;`HBL+p7cG)Ss;)>ET{QZI=Bz4`bEx4a&0ekQ@3#c*1vt zAebPLb?0)R7W!%gg)Xsz5dmq82NcgBkz? z7r6WL=AOPC=$K%GY!+Du>DmQU=$G%`&jW;ZCbDkKtcuoFZ3fD@lB%jN+*&Yr@M$*(3+?22VKNkGTB!26JM zjyP`On+MweY$y9V5LFi_K$~PQgSZ9bIqK9)=rCBhUc$n{h<=|1T}Rhgwp)!>D@6OskDm@Mm7bILSzMpdG=N|8X{%|cd~*fVIm_cJ6jxv4X3`|wwIyC|mwz9S5Mc4s}yphj?sGB)4 zQGf)6rCS2>L7YPd@1a9jL^Op+N3aDKE_|rUxl6+5pg*m#FbUj^{8W@}i2Wsuk=T3GX;7Yb~nwgVE4^sY7PL zINOpxhI=hql7=J@Srsk{!S`Ybm;CB#VR+CY+GLBfvwUc|dY*L&di}HHNJA9W%jeuK zsI#WxA=^-1Qpd1vh;!t2wlPalWq3FPLJbVxR^byK!UdI;k1IImH9?amTM-c03xGWg zFt`&8^KipSZT22yBZ6@XL!DD+&nB4{!TLc}R8+JDAbFD`o&Y>T1$cP4o%fx!+bTq8 zbi9}~$kkL7L@>wU-XZXv|1sTteUeQeESKE^()lkrt4wcSZ z18*95z~BmNq4Fc(fWlWnk7mI!2jR9f}!l(gR#vOd6;WdJ^{ zF`1d7cnZW71^(Ijrz?(njKH?h3M}zw~V;#Q-Y|5MTLbe*rKk1c`k6`>tSK9oXgZT zJtY~U0J~I0ZHS#FsZC1709i}!qtg=u9KZw&d=wl~(jWlG-@QWj&17vMY_tQEA4#S8 zj4CP*%y`Qstruvl@sA$i5!QlUG_;sJVT!m#XrZ@Y>Hxf2QWFVtn&U4Y^f3AuZKxGr zKX#a!njXNVP_>ZA#<1DKPLel^mIH3Lf`G}AF{BHfX|3sFa-u(HK@OE~bMYS?SwStK4!O^%rNd|;N2 zNiwLd&d+6XytStgeQtfXi=ACQX3w+pA_Y?;rP8{3Mu+hBP%#ygv`h3`(&z?pIY_D* zG&nqX+LTM;KIJhelacho!n)AHh0VU2k=+ekYii`M4CihRb#?JkJp9@hiLHEx6Z+^^Q~ z14<(V)Vm!W9jgI1)>c+VJ+T1Go1c%*aE1Ef#ooE24USDiBrVJdJFJ)fgYOcW56P!GQ%k1Gtw!B~o#}nL&@l4r#BkagnIaH-rvY3CrjiUdc$pqqKVV zrc#939j4SndU?96O98S~b{S)mA3X|ypRoIJajT0(QSoe$T2>X-fm;vvzhmNM?^HCc zTl)pzEq0jX0X6hsgZvHqw$FE^)*SUFOlGI0hrRd#J>RwQLJ7MYEG$f$O^iINXeaj--bz2)FJY4*db6+2R zyEpL5$7=X&%#iEc6O-MqC%@@)JI{EuFICC&Xp7x8Habuw`;{qEi{`i<=U^EIw#+)$ z2r6#FEDD*2QTOl%6_tbB5gNCqhxvTe0Hbv9_WLUVBJL2tFN$C3>s|Wy7raiSzZ}BA z_SnSow9dg4zBUQE@_@f5S8~=!=&f6;aK_#W4V9HsQ!e!Ks(g^r*j&xbF0a6v8)2pB z?3`dQ1_u5iw01aUK0r~r`7J%VEjYPf(Ib*ReA=VRH*AboAAhXkT7Tk>f1)v4Mb3J_ z%vzoNq?CGyg7!N)CdHFVwa>xC6cYsjcX1Sat9l)T7rqU|`0vBRn=!+~oACazbFF8N%`KsF&Jlh-(zgJp#`N^wljorO z2mVAD^G#w-FeyZBK%^yQxq?D5(8V+6ahk;F09A#uh^w;%RWAOyV62$Fq)o()+K5H2F`Z8Fc?Z13OL-%z4g!gMt@LZ=bCk zOB9azCC8zvew4++;$)_~yL%wF@Y9QBu#N*&`8)<4%nsGDnUsMNMQ7koh{evQ=6R0} z@c1;Ah(w09{jm==3@@PuYU%8}RA+g1wzt<;0m@_veUi64k{X0^O|-TERfQ#~G_KL# zi!qCjBJa%q&js6_e){jVfQ4J9y3)D5K`fl7SCH}~1hJqugTd&mr+gmcRgRQm4!cc^ zw+c7V+mElMXvqX8tOxwDK$TmxFcQ7l=ZVSKl(ro-PcOkup`8BL9l@Uj&w5zkn-S+* z(l?U$+GftE`mWxkK7y%6J53?S5{4anR% zUm3d-jLtS?1!@*&OjJD5C_~GkjxoEKP7=9vLLy+;0bq^nA7LbB71TQAOfw_GwO7hh zk~aQ5P%+BWgYtRQ-`_y0B!h^bp(MyWJtV_n)iI8zbZO8j2MB`Om(@XRFIuq|mUA@a zE5ds7`6&1&#@jH={p;dSUHi-`&d0VNH?D^Tji1omx#uxPj5_dWMUU}Dx5Zt{JAzQI z+zpy;@)ukds3XBKHD zvPe;oVBa{;A}@L5IreaH@*^z>#Bs`yq3DGv@pxx)suqwWPK-_@1LolCxq)uD=h91; zP9;b4ymj<4TmseP60{n#kW1S3v=QSf@mAvniM5%_8B7kk znF9(WW5=X@+5`MTtO$1(FWx!pk8p7~!aaZwiZF`S&t2YG51ktdX}vVo<&=K{J69Gp zf4bU?g%-NGIORvOHed0!!R0aJq^pUs@oy~yEXv0jl8m%;11UBh{S~x{w7?X>)ZXd^ zkpO;L2J|1on~k*IawVRgZzFfYLJqht)cu80obS9Zn7*u%zn~*gO^-DcJ99f_^lx~O z(2J`JE?DqjbeWM!Losd1^-Grx>c*P$9y0FV{|yJ*#o*vQDgDfMjbps2(-XOFp7aIJ zxicmIeh;rCM@qS}Cu0H)Ckk`WInt{Z&|R*kfZGA2W$p)UZ9buk6Gp`NjF^OC$P`x} zMyUnl1c==bIji8LB;D)B*a&!M6Q(_4RzQqG+K=l>YvTq3TE4ny!)56EfKC&R6>dsQ zPh~o1CzeV?yo>Ul#SrBqr!^h|qW(fV1x_1@vjpL3F$}xPh{BxQEFj;cAwkU_z!;Hu zlZw6^1(rMmSe81hSS=wcdH~Xt`V1GT#Kgoiu3ari$rwO%!qVl-`Eaf>!Li7S^n=1R zA8w7!UK8@(B`#ss{oz%?Ox}0@iVa=%N}}0u-|2b)LLss{a8-z?Y^7-$pNx!5Oi4yY zo2`w_1(4zFC+n)sD#F9UNY1z887OM7Yr8+19o825Y_uzuKJ4aEQ>&>p!JH{nt(nI} z2UUo7sa-*_Pdqa3t3e!q3IbO_Vsn0gea{%wgeSB3nN3PHTjb++SfmN>eV8wZP%3$?Y6s z#$3r`O)h!hsol@TTRoV|7?7mFfx$Kh+FTNRh=%1cM0Po#>cCLpu4Mz_hVKBc`UTFU zD|qdq(JBxm@D?J{ddNBJ@gy2A82rGXQ~$uch_qWPG`uTUt=jE;{XQZBo*~&Lu${lA zzrVi%gV$}We1cyeT)BMtC1Vql6J2o6n&sWVDF1D??}8m)I8A!$H)`Jj z7~+IoI&!E3P=!E@d4V*}s3)_VCl~C%hqZyxduR{QAK7XE9!lpo}JW{(DGUQO}n6&9yO4+uF#8hTh? zb+KN#|A|jgNy*q6smOBQH4FfwA$2tH3BUIAl!*#orNz@7931vx@g@Os($L7rAsqU< z_uHlJfk}|W>cat+jU{2by14MIA}YwHm!dY9g|TUV0YEOZsDxErZAq1mC@m}djCKbZ ziCD!Ao8|t!(Ph+p5W)Exh}-zXp!o;ja%W>(hEkrYsw&C+aZJgrO)AEz?De)lf=)| zRVTcFv5d<~;a&zQ{uhAIVq{V8m>>JSc(L<`nawQMHD~H7E4=|LlVApz{YZ5l z{ecQjpTD$aK-(&Jzq1+b?kH=EqR0!igZA;5fS{FACJK>S7 z$1A3FvC0-b@c@jO2oMJ@$yCWXKOw~>Rsnr$H~$GaUVEX-7&AgQ^e5;ktYN?LuMcia zsFRh4J1@Zr`o_@=qivj%JMDhLWZr{*Cf)HLV57zzp1QgrdX#-5mw==B`T3TiNseAW zzwoQ-c6GP=MSw(Me==Pop!ptP85MPP0awv8!6-yhQjuOs{a}ZK5XB270wIFtATt}+jNmJ&KUe2px?cYOtLFdPRWth?HNVYF4$^LM#hmX` zP}+!jMhMfV!F`f>>5htD)u<4lAlt+(96AYM`co#wM9CQD^j;qIy zJy-Oo$=-Qd zpwjQU+0RYs8$T;fADaXy=LAnd@eE_R5WQ%_$B0+yOD9#=fAdQ;KbwmjZEOli$l_1H z1^a+fHIR7UU^Q%X`ePftBf`BNJD^iTc)?dwzJXvMm(mmKsXa234jkwT#wn01h zd@%qa=x9$O;3MQXoRr_9rX*qEdpeieL1Kl-c@MP6w6QTlfn6YKgI4PssNZdOx$TN{ zD{z+&1&laxcL1lsN5pLepK|TXQg%#g`+U7vKi(MJ#{ctzZKnQzv)0CZ`v1aOJIawu z@rJ4PSUoz39m0)>X>y#j{{B|L^9}81{oUGZxYiz)VKs|Z7iZRGTkcd0*`Wc^RichZ-YNWJ^Plj;85t#^XR2PyO|cm^<$aXnj&4|aYn{P z{F>Ng;egL_)#n!i7wx}Tp1L8n{jqeYJ8;)auj|as73_g zbi6`y^7dIL_Zh^IcWO*xLSs%PA~*A};TnRBEqqj5Aj_CS{;kK6NM~ zgJ2QlCJo;$-F5{u>4lmeqVNMC%p>?jP6G;t9RNw|Mr^Oc6f~FZ=W#f#apS9oD)-F*{m_tx7 zaQId4GT%K=oX~Ze@Ud&;7FN1>xJjSnbidR5^@JaVS>FJZ1tVzZzL zm@BdN_&w9LU?RUMFTcuTxL`6rgxH?@QAJ6I?{KDg59_@VvmTD51 z&3A0wYPcfq?}_6{R7`R&yed=J80*MLfX*5;s(qlbp`@-o@%1sNQ9wz5F;K;O4n1_~ zpqrUVOpO{fsGe-EZsb^f8_>Q5qo1ZBcoZmsTT|70;@GjOdcV@r?IEJ+y>#tyw?MeT zkK;N!IQ}A_+;iC~de$R13sf{b3J@k$Gi>$wy8f#TC$PaWiE!?(rg9Zil+Q)`Rd6p5 zl2h#=CXaD;o}2Tte-qT+Dj+(8eDv*$7tzgqwSV@Wo9UU67yoQZrF~|}Y@vKl-@K~w z7(OGC@7~appI>}^>hO~XzSqSTU+<3n&(G%oZ_>4r_d89eEf3*4IO|h0T`_F`5#9hV z{mZ}WK5WUZ$~2hrzF1mrrlxJh@rvBR!IsREy?gd>gS*{(>I#N>Qg}52yv8Pqw@h2* zr&X*}SmPE|u(}UH!Yc_c1fS{IM3k zfbop$*M8Tl+-neeU}zjdtgzktE{TMPE9K@ajBP%{Kleg&?OU@qxw*e77P%m(Op)gG z!9ZyCqGq{y%;Lh$ajOc4`>Vh8ueNP%I#Ds(RrJ?Y+|WG`xl}jj;3eBwT`dQVX_BRC zDm5RAdMo;(Sx@I|mFHwh_E7e%tNe7Y;%KCK!Pkls1IV@U-Y7|~TcGg##^TMxg;>(Q1ECcp~tKQWPHkpNM$d{+&YcxnpS_f2Jd8E`Hwd}#7 zy$8Cb*FNY~f0@Rf9(FRHvUI_v70nY9{hzbXs>{%FF5R9z`;>yZNO6JRv98?`66?o` zL{3HD|1A$eBZaTGJ(`seINQUE$)lK-mXC`p4eQ;;9L*H&WSV`v-i}QLW6iM+&6-A1 zYbbwYozN+f^~*|8%-I?7lK<`7uwBN!w#{_fvXAVRjhWvXd~Myo<-cK_d6VNVu$1OG zpR!n4r7-M~*}!DPy0Fvzi)0QTPAqhpc(>!b{PO&M35n12F@<9Kr1Gr!Gb!E)PghS7 zyU;&AM9H^^;hG=mqYhp4JD%dmI(;cR;dk=S4y&oOjE(gdke&PIYtrV~9d)#V_5y*= zBX8B2Z1%+W@{0Z{=1ibUY_$^|)(u>?lt=m9qTwg20w${?m0}ZT-j&^{wZt`LXN z(91fmHOs_qvhdG?m`}{9Fd|KfBGW5AGmcMIwDe0;ncr@kt@?V>s>V63F(R?wG^RI{c4nTGRGU>n%jwo>;{5!_${QuP2#=nA zJgebk#Y)S7tGYo)8$O72$mnbDvHzXY;_mM+I$A<)Eq&>`{QJPU!A~19r8ElLgB$8w z_BzOwpFY!Stwr(6%zCl*_ZgH*qrHA9%jV8wGAXA#)Wh*d^OQYeT`64|e)ie|?*ns<{9ByH2}1;%c)bA3b7PSjT)~>Bf6yU6`88 z=`yg(a^sH$M`P^vYUP8Ir}pXHe(7AGPWSmf!U*Hc7pE%!*(GRCm)J!WxW1L*f0Mqa z_C;k?)wyT2bLP1?Z))5?an`EVA9(Q|Kg`8t%P8KLdmBGTLKd+6X7mv`n?i8&?MMhLs&ceByHcj@uRX^4xK-Vfr z;eUK(Fz{4fgC8b8)g}(LyKvX8OjD@Ay^V zE(FF0FPPzV6v|4UIRM;}VBbcJ8AXX6Z|h?HjB(b9?uA1*<{D=-o*9%!u(b zM>Bl$lA8^q^Y1CMDsA>OaW!Y%%c0X8P4jX$EMgA_7pHBDq+iXxvv8iG#pT*? z@%2v+&M5AyuQIVZzAuzxBPl6oKX(TA&U<%uxNG1>I}YFH9=gTgADR;@DMEigyJEW> zO}GBX``qz3bd) z@s@q%VbRJTPce_al9b$a_ofeiW{P*r4zqP`Y0st@vIiBmOkCi#n`jpa;h1DT-}Ir9 za%j`0pwf4KDfD4u#vrvYhEwAB_#ngsHY7V)_qb)vM?d}gh+AozWaJarHOLWnK>An|vi=94Rjtr9A zq|G*TPfx-F-E+;ZR2N!8R za%`No&-9buM+sK8IRKM0nT6Cj6g!{`W}X`!(z$2B3axbiu0TVytHfNwXA6Fv8AMP6@7Uv3iLXj^+mX9+?Q`8NVB_6jjoY;b-beHaDg>1D=AKoS(_Dm2?fXa#U&qu`^M;^f$Bs5Kq$;p&8fUppdVJw~ zlapgKXyQ4kjEfi*wnk`nv*$%FbVP>l6_r=D9&zbzJf!chOJ)0boSj%y6tC~7KU-n? zZ=XLPFm$}1e(^~(kNcd3f)R|#+%d|gO__ZSzjZyfvP+29dQ0c3dzVmGET74Lp^R55 zf9+Z^`H+yzkr%v2Vf-1g>gMd(%wxT#0Uxu37wKMY2~}i#={u_YC~Y1}X4A8CjJuPi z?nm=u8#9izMYu0cPd(QWN}DowhQliCVtO;&dKX+Zzlqw$nyN*G;lbZ6}e3ev`QS0Q*-!ti-r_i_jeqj08$;?>EPCK5l zJ=Z^d=Wx!F!f%$4kgK?vcd7;T^pmPdv4wAB^k0725qfmDTzl7G;7Yu#b_9@`uY(q6oPF>Zkh@JZcwJ#;Fhw&2n#`EbJV|F~2ziMjB{n4Ro zE?)-EUAiEs?RxweoX+Q*``Nn2*g6n8=e6d!tUpPf1`0E5hW+pU)!JSN$~3$Ct32b~ z8Gb7J^0efhbdAOm`FOekx^`t?d@N5AnT(HC0)W}7SD6O3Ub z2AE9b(%nTO&DDMX3c`wZKK@lp4{9ZR+%S#uUx2IodW||N1XlQ|ZS%Wj@2P7}d|Qps z2y5)i&|HgSDODidV=ZFTC>8r>g*kkEi@LeY!P%$EG3d=^>U3 zIs2)h;W8M!FiO{sHY6^qP$%yGT^-v=;JHN7PU3j6^o4EkoL%?R1>j9IRnKK;E)i5No3CKGBYc?UsqEj4FN3VN{|L31i;T!%!5s> z9Qk*ZPfVNeyWsJZZL|#9@;tbvf+s?*2v*-<`D})Og0dKLYwWqgRvnsoCnAE-tPt_N z7<);`Hz+<3yR?NGf;Z7~&zU=4_}3U`up&@a{$yX1Klq%YV)i+Ln__V#Td~w+LdU+# z+&q9-KxbqmIlDv53JZ{%At9f!QFWkw8}BywAK8$(>7Srwt#})j9moJx!1(?WEV7=E zdD^`M6C~&>hHL!frjDK;2Z|zLZh-xe2dgWNWM;w{65QDG`*oQ5@v|f5g^>S<2g$p4 zQz%ehOt+Z@;Fg!7RGcrm32;+qy$_KZfjJ91#Ul9Q5fC}VHIF$mzZqm8Vs(ih0oQ7= zg2L4uR{{eG^_}>iUb_~xKq|kPd*KlW8zQ)Lrs)pM5LyQ+0wJq`RemunOw>H*ln_X6 zn~xn+hWa&Y7&9>uX#qdks;{pD{SssrMp0>a2a=p{*x zQy1_F+qwQe5P|@CJ^Krvec#&K4IYGE!L{SzfBYdvISIl0r{gN^tJTyFBf4+dSObII z$RHT<>f*3?W>H~@?Jvx^X$U|0ssy7_d>&Cs{TIEft<^?va#HMn7+mFaXI3_QLty_e z&>5sH?5)B8r2(ygC>62CxA3z;5P1-cq`9h8->Wo_UXjZ|${e=IH_oOX)#p|9*52(` zAeJN^ry${O&=@C55VFpJNDw}&B?)`tRF24|WiF$!k-yimC^EF{d(B++Mova5)jO-M)x%mnuMR{#%}XCx2BQM(y1jkfd{ z&_PBO4~N5vv6<=}YI$Y`AWeXAFL4uU-W9!}Yl5Ad$0UKEK=cdnf`hLClLR>`1C`~c z8vtyu_rITC8>A|l$9i6pFIp`sA~kht%?Xu=Lzf;w1oF8l>jc~h_DqOf zHUtCshnUGgR)gzip>D0z^~qhUxde=unnmt`u{{;1!9p_ivgk0N)iu}GUn2P9owdRw zfwVJI!}A}krbo9FRJguBNL-+p{KI8nWp|&PMOn)qk=EqC_d9!7Y}hvO>kwn>w40HY z3c`Mp21SZ?f;cZnUyJ(xHvz2wCC5(_F|>cWTQNA#ou6S9Sd?8#Hmk%I(p5THCl2ZF*!MRcGRT_-ovd| z>~XQTe}NlJ*3rm;tWtJU{mT(xK6Kf+au+JJM(F?a7I@-wDI!uE)Vp30VX0=$3bkJt z=iMz$+pheVGAJ`TE>4zYfV_Y2169YjW(IAUCmfK7Q4bJu;^*T5QLbF*6@DR{k$B;V zGXaEK(hz_l#aIcjuEoXV5%-y3vm*=uBS^U?k4U#v9uvB|>({P%XJ)R!nyVGbqpx1S z-iR6>ItJ5RT^*g@z%eKLEs&B)_`A{X^#21{mUUgFq?C`8g~u)cc8rbvIj6n#WHy~T zwFc#s#HRr2K6vQRu`cUY5#h*d|IdN%!5$4d>j~$71W`?~ieK4QQLN_T;^|N)FcFK# zLxClQc)!NIz_9V(zcTMXVq5RefnaPN=saAKEq0ksMzI8h}76t6nRho_93z}vUagsKSU=L6t_9ks`z5DEnn(g5x3 zYqna>ka`Czo{npIIHr@xl3vms%P)H?$lpLytH*Ucn(5 z6(7%MX=w@Z$W_yER61fkj*K2<iOy#=Wx3f(Lz&4t+9?R#?2-_$FQBlU^} zl`08106`5*$n|4QCkQ7H3c81jit6e>^a>0Jz;?)ryg*Pgof@ezN$2?Nkcj^4`huNV z^zEi2S#$r2t9Oad^Q^-J@%r)YD;dg9k{zBl1nAo?41a_fPYD>yb@d<*HA0t z*#_YAveqHT;6)}8H+SMF5y&%`0f6ydeQz$5{2#F^xs6sSYhp61@E}x*O0Y!Lq#rmlrMk2JNZv?ax zwC&I^oJb)wYx1Pv&QcbyStB~ve{(O*#>R%0aeb&jfO-H_VWPXjIsvcc9179liUi%h zEe5^pIlL_)Ibei0sex#Ohhz?s2JITroXu`b_X4|^IX?aw;(T5$c9Ou<4-AT-nb{Q_ zuvXaX;hir4t?GL-m(BgKZipF{JQhTk3HEH>=6h+{MiuB|QvIf<*3O-%-EOKQ)Uf3J z-t>?K7Ctd{yFSO=VEOH~Hj(>XI(1%xeCn1}`?nsx+blI+&OW?q_1LHVOQJj2XWBz< z58Y{BTXDL4RmZ2ZeMcx-SoWwkY*}(W3L!e?iyFQu%6Pgp(k_KJX?h=)Fj>E~uA;&q z))S)0hkF^_kLhl#NIjVgusOzy75Wuo16(9%yM^DUBq|p(m)&E8vFx9w+(677F>@oj zoyf?@(#E*NMh2y~XDS6ezKx&@ER>WankV;9r+MKyNAc;9QXUEpV>wnEJmd~pZRMgq zw&fvM7Rd;NWxoY$#!wj6EIrBjIwVE>#BwsWz7c~)$}Z3Y_E=Z~@j?)S>KbuKOChI-)eZW(;3gscf5S#lz_sh0?*s=``RHl8r;PgH8fUt+gTn-c`}7#L zL$=M3BAgh6SzdhoNSPvo#4TX9%3M=p&kb*(v%1GtDY;J0eTVo(W^o;4af`qiC6Y%_ z;C%^80;z8c9?^`KrfFi5nm*e1k;F8U69o&(Fip?w?TfUBg09_3;dro&)47-5a!Ei7 z7LM2hU%+>50V(P0LG3m~C>AmWh@+pSi{1(^1PvBK;`JT6WBUMTg9D%+54(L^8dw9| z?phC`8WM(PGo3qo$$29=C>lezpTW)w)?qO-6Q$o^e@zr&Pu9_nZJcwnjtp4%JZ(4v z5Ca0jH@uUB#9O$?O{I|ZW#k^bz~Ob^$PtC@7wbOcQNUz>^}!aLK#Us2*sv0l3GWYe zhKXf)cz}qP0x?5C7!HL3YSr2-xD$?j01XSBhIsG2w;bzglMJ?Rd_wQk$%W>hDEQ44L7m)+Q5#j^wg2+!_6R?kbG>YJ%q5w zn@+L&@MIYd+09*Jt6I#YFxZm(YC^?C<*adOenzLA`4n=tF80|V`t9g-q?c>FDNN1zd)}Ucq20U zy(@S1Gxms1^IsPP`%92|x;;9Y%Czk~kM`8cBTOe@@|p`w7ZG|>y40Nf?&dW1tfG1j zx*`kcH}YS;)Q)p)__n8-eUVseD)NTt^F!D?ZQ8W>ro)L}z>my4Ga?6v!;2ur9<;Sh z+Hhh6(uzLmRO9cA@7fQKjHn)WQ-NXJMaU2F#_PI(E`l^iTQ$nzY3oFB=|eVO~i&So!H3H&MEDkdQ<8iv+sF7>9+Q)ooBGkt$`uK zq#1YH&YC60K>p&5E2$}ir4xWmj(&M1gjYiF`f|2|`23ONOV99&Pa5~pC?ph%xXVF< zMMTG>V~QAQc^Y($OYBtJMvBvp=Qp}A0zpHNMnu{&A)3&M!?K)*0!z`OR`Vm*3P4SY)0wbgj?ZKWro?)N%v!ic5Tmgl zgz^Xlx3Z352WG#?%ii;G)Xmkk5X^sN&`E}S8%0q03oy2Bmb@if%keNm;W!o2$V)+i z@`Yg82CjzU?KRHa?m;Rj0=rgi>UH^f2|9v@J|H1Hn^Gzzfl`K^^p7o)j@_($n2u7u zBTiG{8-#I`(q`5kA?KkW(X3~zo~K?_yqnl{&_ct#su1z4E&csLaPaDzC75dWb4oaQf0wZKsL0uTQ~eBvV9f_jw3~%^^sd3RGt2xcYA1%#qj4+-moot|#09`a zmjf-#%`;a-;GFLsW#BZoLRNZ{@h3w3hwQZJql#bW*pn7Vsx*823n0w}X(@xw5*W5& z&^2}Eoi#jKBgzTk*_t5J68$`}=EaPF>+uE+3|NovPKoSnAJ=_|li#{l(eHp0S8~-s zKBxwWi>YPqnHg89^Ant;$OFwOKy41FTeu_!v_fG@Hk^ij@2XE-$`gg*M zR0VF@T2HtgEMK`Y01ZmgjauZqUui?UDyAJ*a1D30b0oe=h>2|@!jnU9^8EHhj4oRe zj($ef{vA-shp*ng4MZ5B7A~vN6?Lb2rS4`1V|&;&w(mP(V+PLou3iW&rBoY9SnhE+ z8&>7@=M2p;VRWa_-6m-0DrQS++yop;!nmgkEYbr6SGS*$m@i7oy2tU*-LoLBOv zBkK>N>0N=3_4&!ie*72tKh?Q`yvJ7k`aDZ#bS1?rXmHTe)Bmvh+K+!Aaq_`$|NR*O z;xqm`Jsn?ZOW@{y{VT|5T>PK^_)L8I=|bJKe7mwGJ+YB4M;x_9x7Rbj_b7h~vO0<76-8kll^%gIv%CL7Mk%j>BVyZjv#>4askUeQN$zg=< zrRyda7r64_{?)`=tfE2y5_?rscA~H{XUL@k69jqZ9O{y1<8UQDLFfW&9@VZWLG?|w zDwvMMS6_bN)tWN{sn!i0fbog>DvZxQ_w`-HOfXfjgX_^|dx#A(n-Cl*(7QS6o1ONU zU&;Wr5)D2yaz3C|euCW;z|N@@ph_t3m2d`8@Pg6`mkYnrp`#OgA1aVk+lgIV)q_aU zi6JYy3D5ENP+WN?3p_(y2x z#LVdwCMH!fw!l#1C2WMyt?s!kRrDqj$IeUiYXd0WcJjKxT(N9T;Y5ZZhxEWWKL;kq zPbBAHY(#TCooI=cPw=qDq@qXXcTZvsRcW?YH=!bVb~Vwqd3Q6`nijX*o8T(F5^@LnZbFY zP+Y3B9|}Uu{B3UnipQfc~At)0$?xCYUmzFN+6O6*Ej?c0p9#g*nd+Qy>WSozPFoJ;4|1 zu8QTA#pWg!pKs>MPP&0v?mQj`d(7vqupYx5kAauNrJc~T)3GLr!LA8;&~ttL{B{FV z)ahWLv%)$i7C=H3T;P7kIs@iK2j6S#6|tTysn6NoHdUw!j;=bWvEadOhwMw%bD}qc zR5qgw)0Bmf4aOm{+iaQM8|HEq4qv(f41pn$+Kq8N0-!7LXcxzkA>=JZ2e5js=mk!A)|y9VCWqFK z1C5Ty8Edp}-||u~zu~{0OLFy4t-7$Yf%(1~IkQMSy&x5M+9 z#A$k<16Vdtih37k{x$$^^YCE6AgN^uB0aBVyY~|;{C%SFitts3E;j@bi|{b8GG8Oo z9Nzn#=(kN*2;xiP;LHh8Sx~xVty}jz5jEt`4UUa<Y4LB$9@6xq64|=T=}X9qR0T#q)aqA(tGtE~3r>eC5+}n0j#p|xx+tOsF|9*o z6wlZOXuJSao*o)Uww4_pR~6_)!eJWq$*A)!BY8ib;=U$QFWEv2cxhtT!x{17o>WffbCu3oZ;{>TNQtm;ACo- z6nV|l*XwmtJ z#18{lxJ)maKW`q4A0;pM|!WQ21CfdF|BfR zVv`Bmc#*(3l(NKgc|DS7iMJ6R2-!cduZGUio62EMMK4u%s%HSTQVFUpl7v|iNiD%6 z_ui$KsR#iGAo?n~6bD=kKh&l5UX%Ou_hN}aPT%HcDZtui+G!t0lIA2f(3yiKPoPsH zNd~w8Xw5GIAnY8Z&4Aeai@P2i8kC<4dOm=e1=I7WT>;?cGV zJ#}Tao1;^6UIagmCqX!4BG)%aF8PrXeqoaHjJ9B;oM`zw8`4+6xce&X2&_=7fs_f! zi##FGyK^Kpd@!f9K_d@$<~>;(Otx}Xk<7^ZJ3<9TMVE`3gbwVA_q@ahQ{By64qKUwQd; z%kE8{ZPA#KEed}P!Y&|@t@++OF`$m6jN)GIq}6h= zW6LIG7UVTcd9X~5&_mYAlP2bfItg;X?rICbJ0o**J?wpNqW5@xah6!j$@?CaQ5&1a zU;up`Ms;v#Na($ZWz==p14^zv?&EG-yw9#-n+8mud0LK`96xqU8VCK8$B!3NzV!C$ z-Z=i8K7~S|2X`=rw0utUxaWa@Qj$v1xuleF*?kXDC`^bY zMqV?wr#bg3-iuT~LEO+Bu>iuAI7rMKfTxlSh~Az2&LhH4Qli~W@jz-vF9tX#4$XJS z&6}0L<(K1>pj4u(b_^H^7UNuY@=#6uY6W3kRj&q=qk z;|iU{o#B2?V$=_OhS{SGR6A3&$jpZRG`_R6fd&divrk`I%LH5c9=&~3{ZVG)=eD*2 z&m%ZAVbymGT4F&V`~fG|>YaT6Xi$ia^sZtgI3D?jiv1K+CG=Cgxa=J-BiElkkBQL= z8zM5AC-UQVEm#0D6LyIt9+z0NpeZIU-w}P!;(z~H7Sr1MdwR-zd4KyYq04Pz)OlBX zohuHdAc%|(0M9@aLlOL3jJ}}n%*XMW*+1Xi8k)&UG?{QUpe%2;hm5%PRFqg$7Ts>s z={OxUZ-}>;jdc@O$KZA8J^+~cy`V}ve-LpT}*0~b#RDn7~pvN5kH!f$WW!VPEU zX^n?)fts|T@fV3;Jjo+QvJ<1dRmm3X1h4Pv-YZ=Bl?qb?SW#h$i7*Yvx(lFN3*3;g zU(g`SKX{E!p>)?i;@4_oDj`AimqrOx2Ut}Z1Zs31hnsR|O_B()z6ZDwjsB6C)Xm-eJ-VMgAn1bALu^TLG9gLJJm<x zeMgsc9Gzurm7ABa_M3hI=N}bP|H@_AGjdcD*PVN+xjO2rSMS;L&{`*?OU@6r0H zf!qHZV83a+yeEPLDeV!@F`=Zc;U)-fz1E`;V1K+yRWFi%ywx z0De%2r51*Zty~ymuw)D`GB)-k$R5h4|aW|X}PDH}B zb#ch{&xmV}@(4{tsDK@6A`y+AxF4vGB`|hN>70173rD~@yR5ON+ApYN)x2@ewO2k|m8tuPntmYSc6>k50U8z^dMtFVj{Cn@*% zOQ7g4Saq%#4k|N`*${JT{&g_-LpW278p&mj8PjatWge!8bJJ#Lt z`R4c&kUz3=iZ|xvzK0^xg)Ck}ug{>xiBs9b!mwOEaQN_p>PZjqkLo{shz366aORSv zRN@&=n{Sh|d#TxJDVl8E&`}fMV{1J3P4rxi9x~Gz9%t{mUfP>-JWl8M@l~KcPe=6t z!_x-6+MQ}Rf7rmB4PLbpK%#tc-0XMGhri=XG%W-*BUc(nmv=;jr1zi&2fIe=*Eg0C z3aR&T-el%hE#qM97T|{j^2JhVX&zYm8D1wy+&{4QEHmpvtHelt34oy~%W1=!XwR|2 z$7bYoU_8xjPY7q}O{x@jfH9@?5Yd0&+dLH5o(7=r4><+Dku`s>4Em5`x$Y4F;GoJ% zL&{6`g&?f^TCw+p-N;3>YM8`Mc`}}G{td#u}tE{6~MogVs1{CmN1 z)+7;R%Lo^i`lOFk+aCPD2;KpW2b8OX$)_gbjAxxT?i?h=EX?0|%&gVG5uIQS~DW!B+*LE{Kqb0b4!Sm?tWFi3|hSvtWx+f&yUd z(FUq{32M0#{Ovtp2uSw(c=}UayLNBPGLmagr&5U3EZPo|i-6)u{GQO`Bzs7Ym*vA2 z7GJO!hG8m;9?Pj{4hB-Sdxy|M6E7&>bDCR%76k$_C!9+(%r)_j#sQHY-`k;lSH|QsR`ZV#;@|w ze7+>H49_VdEBAS0y1LY`UbsyPl?2NMf=i}??>31QklY_k>y8cpt+4@rPlrNo*l1yklz-u7(n@R-FSPoWV+oUkZk#05XcC+_j zU}EVYrr#y7+{RN=$usECo3j4f5-d10C%u<@n2RMJFJqCpW7O^7n3kwiifDpRFwBGamnIZCz$5;9N8)PPko6lDw< zq70ECmYHR(^}pYSefBwLpa1n;-}RkqU*~jchxPlt!}HwFJ>Y)0aFL&%f4!XC@%G0X z+h1)CVxMoy*6kfmn5`AX_Iw)_JGtOK(w{UUT@>4EPf6ztAlDrO+(=9qg({si=Cd3F zN`FmsEXGw;R4Q=Xs0?!n;`O}$rl^cAr#PoW-fY7M2C`~VlXzEe3}ldlxW!RY5EdkX@8kFvu3g8@u&mWYP5BmX z2wk-)>LfiLzDlh+BWqU|l3vFQupLmpy#zc%zS)l$_@`j6e8A~HFf{ZC+f*;z>}K)U z#!F7TK-%`pe%9u}3)&4<1%}3=1hnIrIwO|fdITj48AjnJm@|7etaa_1bO<(fA3I50 zPy7@c+o%d&?|Z>WY(sz>#!Vs~>p9;VF2#Z&-#ciNWNT#k7%WD#= z#f048!%6dJC|!vk5%?O01n1`@9kV|3oDYBzoh%Vwkt?2nD?9-Td``-0ia&458+5Yw zb*u=%H!#cp0^nD#k7C3coye*QV zjXd+R-sA8Rzzln|$+ zGtK`tWy5Dx%_~j+*VqjNox`gyM{O0uA?bnRZBC&NVzr!{Cij?6ba`*69MDgp3t5Jn)0tB@iI62+`Ehcy? zN*v`O+UP0|q=L~=d1^Pg^8ZA;^@c|=_`uaH-h-D-5Ej5#?RjoJR%%FV6xO1ywSE=? z1NXNWvYlM!0d+H9mziv+~)4i>D^wiW<=-uP>v&0E?eGNFY z4Sldzl?G`W%!#FS_4S%CZ$?{s;<$t0vt#LX9#&3^jws}gR7)df_O`rOG<#q?M#BMV zJj;!Tcq-N3O@^mXMVW)35*^26@7_K(_Yc+Ye5&$pd?_v3W#WhFIo$+hW&E~V>Flr zANRbQIDFsb`iMkox8u2PrPTJpq`rcPB&J&p)oEl2A1!}KLTP|U@c^Sg$Z$tar>q#j zOA_u&yh1EPpx6--6Z2zm&Yu0{6m4>V6A=DI%!J-O*ts05r4ZnuZE-US0N||!;5DRd zDMblW{_GD^+z+vCDaW5JM2xDZudgzggkV8l2L&x5ADI4sQyBG1G%Ir)?T8;ChAORj z#N8WhKqN+>A{TW~u~fG5-bBel0+dLx2jHIcQW(S}LZS#!4 zs{aSG^7O2_<^Xv49$>12sVHOHMO0%X8>GYpz4FHwb`qd(SELv!^fcw@3$MUv!3UM| zBuLbl%JOc%@bFOka&HZmQ!o2}Q8z&dcvwlTs;orQC$?{!$yiJ{3+MpnTbQ@b1WoiL z0DEGUEzcS=S+XOHiEOnSz;Qlzgi*g5+`kM4n|M;d@r_O++0;0uug647kDAo=t4C5OJDe zKT%5ajV~%^y{cvB`wG+-Dwqs$WxCYN*H`cfoLb*d#7mZldz(|YfIQQ83wu^lb9xe2 z!Jk&X!#SBFi_%z&^ujEc)AC0ooCkS)G#ImjQVygHX66o9T!{`v0kFMdJio9|^VqRF zos&UmXm8$=Hjmh1h!aE)Hi<_sV@fyN*K8;z_jB~~tPdTlASE5-xLk_`+&2}&ACmbD zMpJLxVK9s=g23YH2RKk~!a@YJOe>k*#lb;AHz8gkVRD?P)mC7eRbt&e4J}9aRRs}& zr&?C1u@FK10#MKZussalFmg!(Ap6iVyUwtA1ovGmYag+t0%eEqJtRDUQ;OX!pKD@x zO6Fa)@usJ7P;_G8-kwWtyJ=S*C{V?Z^oj#A5B|z!&bJptJ4z;Dv@B61 z4r8so&$d#7DR*JGKT;n9vU&o%?Y(Rf(TiFf*0){0=;&cG2t_plh zjOfKT{);$UI=o`*Szae6r^m1jM!q^fG7DbgIaRmN>kQSBtr(t3RQLvA7)uUIl!=31 z>yWoiDCJ2r1yI5LBF`E@jioa_vFbK0b42(z)wVX_xFCC5G?lMKq3|qKejHd9j>0K$ zi*MR|8`8O7oZ6cG;+LFiqNv*Z&2C-7na&x4w+-GX%oA%aA3qqzihQ42Eh#@|Ja~a| zPcRfc07H#1<{=^IcFiI9vz}50aVd2M(&SSMmB&b6e)6E4}XAIq{BuLxZo9_U9J^eeitRlMXT@qtd)7;l!!9l$_Kw!!1_R8b1`;O7w6-EjLN|#+eaLDY z0{C@oL2DW<;CS;0cf5jz{3gR3hb7p(u7DIUoVbXkDt-2&hRFjUw^JyMn7s_ODWD=h z@IY=ErmG$xu;Z{RdTS$QsFUB8lEfT@Wub<~s*k~_ZhUZ5ST`fwjD$YFqJIJOK=Ex2 zo0#wy?Ck?nUUYP5Xc`4`apXhp5)W1aaTe?u5%csqb&;SJ9I-0U8=?{N!NM{Iu7p9W z6lGR%!HqIwhE6;EpjYOE*Eg!utSFuVb)}MRHZF@AsJG41Bu;)DgJdsW^7tEhz%K2> z5`y_3`>+X#uo~ZhgF@&R^xW~U%Ri(T3gAHal80OUSs7`p=pZC}LPd38E!-SfW;Kb+ zK;YafI!d$39iIU-4H>^LJ1ZiIWc1LPH$iaga@Z;N} z2r&5~)@qQoZ>mddBin7FXt`B3$1Z!P+8J1yd03iu%(I=78!}mn*5|qfAfPnSxa@4z zIT0d}6W%QqJ`952h<4sKm)XOlbc?4&bDG?9q0SCU$zQxaY*JdRk;AjeYni0+w9#ky zX`WKq^0|!fvKhNaQN?%GrU(#3f#4IxFNDy2Qm!DHam!unnHoLwFP|p01L!=UP!nPr zxNiM)ItTacdg0BR7m_F#7`=h-N^l-%@F_1*%0I?an?&{D@GWZgxflJQjE%u5+h(N= z$EuAR$>UJb)m?^bD@AgC7>bSSnd&McwF>`Yl~X_foQ9?sDs}I|p?m8V3!-U2Mx!67 zJSP!C#w%^M07t}3oGGCtwM$aRmshfY=V@GCceL)1VymllRfN#QL`*Krq-Ugd4Xjdj zqD8+-qWwV21DbmHre&IdhP?bpS2ZV&sMk=0mSYrzNMSFAQGqaztUTwi#JN^|O)Kp~im$VCyCthBV&mJQ3~>^FhA3SjOk zoQ-D@u2+-u=qHi8R8eFZ&x2|JBo*`9xIkys@7ah+INxh`a`y_3Rq`xe{28pc5UVM~ zfB?*}?I#fX%yFA>sh})~aaEA1q|dHOPmMF5EI&P&FiT${7^hvz_X%Au4bXR;y2FAG zwh!MA@BSlqsPI^r*WEDp9wdO!v6cpT;@P-Ls`Iuar4&r~&!#xRl^C+Hsxg>$8JZnf zuZ%>sdhzE_Yr=)K9Fz{U0s1LU7&}6TZ)>X^xZ(9}p9?3JZNIRJ;Ou}^(RLZ3D4^um z71h-(!{-1DUx1&UsExson@VAgwQ>>e2M{+*e%jXEN8@RN9dgZKgI&aJ+nF!LW{5O= zK~}LUFh?||Lr<=v&g%=E>4fU5yFuQ#D-4n#+(!hOL}9cYe7HWblc0l99#(0)6Luyj zlURiXCDF5~gyHW;hRnD^R)sqBzkWTn!`g@~Yd5AGoqaoq6&tlLdnMVxSl9w2k zw@?-lX4|n0izPpZouVh|dt&nOH*sN-md76eC!r&(7ZF)_+h;|X`$TXi{a~yMuy@-nn%D&Bt`)KDJ43QuBt^$IAqzS#P4;u(LYOY(!^G|3M2@QJ~< z8A;Uq(y+ou0~7Ddhj$n{m?V|CeKidqUfVoYP)nQp-Y|@=UNxZ?>$FzYQSQ-Z#A%6S z?Sjqb!=HnhtHom1pIiS8*x!sG128v*q@)60oYUk%6ONHX%2M$fd91NLsITDbpmpf9 zpSO1&#;#BMsLWv=a>oL9ia#D&ejT?Eu14bH`+eyb1@`13yeYGfkHd@q*bR~kN%z9( zFMQl+=jY#79Rlo?@O9A`?$}fI?wvCF4>Yq|zTQP`gr+3t0q1lI=o%oV!?xnpK!%uf zLBxKhbO!CmXk;33{{~?6?Rahg8(iAu?-hpW0!j^R96oUK4)jGp80M3be|$ZVQT4u{ z1!F1j=`m*XvoecY&m+s4_2U>hEmEr<%bxwWQVVIKCe2;E0$T*zs7tsl6@HCuaGG90 z{p`XV*7-ii8aa%_8e_rDdagReRe;$|lm{2=>}FEnGOT;NJ>#Lq`lr*w#0OIb+-%!d zPhE2Ofg$2kX5a&fH)2+Vr$f};eV-(pM!u97&XOEyO{1!$SBFXDVDf?X<^sJ%CEcQW z&aUx^vWkks-AlHgOkA4Wb2RK2ka4QuJ4Nns_x4~*iC$psX*pE;cBAKtC*9Q)Ko z%EnpzcGI!$H1E#M3OcF{&`eXm?Q5SLzATp?&}v(597VSs7w@+KOA2ShSG)NSq0tBpri)n9WZ^(mF+>i31Iq$Q^3I>-qiy3O4{3 znrHWx{tM0r$BAo69gbppPn{Gv!n?(TiP{JdaKsFVr#Om3jd9jse4%l9#ch0OyOT@% zX`x%8@#OMh!1t%4CCC`Vy%lmoR8vFa3K`Tq2BO~8ww)X@gBkt-E>6RkC|!dJ z#$ojP`=Ex$MoPzYbxFD0xGVHOr$-A;SW%!7P{DzP>$&!lIoqKJGO@H5n6r7{;De zXLy#;-6DXiZ8vf&$j~xDM}kt{Yie%p^-q$Jw{_ZT;%6qgy9}ReJj1bMbvobI(22F- z(qvT)HUCDlljk=D<4vcWK3wNz%o^trFz(5^)T@W~idyan^D zFDdyIt#gO{Q@^UqrzbV%CzoXit|Nt&qYNN*JT;Ya+_15f))xga0@C<(5tCvPd}iI$dE&BiCNp5n~kD>{C#AT(G@!z zHLExmrpZ7(V{m#Qb2Z_#^tQ6{R%2tM8NOTyNz*VHUozPCT7#_> z;m_u+$Fn<_;v^*G0Ji-4?c48oy7g`Yk=LT~(QMew-UI`59`B}IQQsA!pXXM_ zTQkS%vxMpmmaSX2GmJVXibpng_N&oisc_cnGpzC=n!CZ}eQVANq_U#=L|0(ac>qQE zI`Xy9{UDiK^Ww$SeIec{?=bP(dN52rG%)JXBMmadsj3PDrP&-^q)4|bAdd;6Gw6eL zcsP2JMchylmVq#^TPxNJa5RjYD zkj)d24{>a_KvR#l%G=YEG^NR5NcOG3TG1*lw_~pQfz;lit9#`#U6aDxanv#!Jk;1^ zNgDU8KEr$i?T(Vc<@j^Vb!VJQb#leOxtG_tC=$@40PZKu65er3rFVT2xdWmxJUQlv z7ZLICmUH#D+pM3}Y=r1-9 ze>M2!IgONsDUM9uE&fOM+_+%k=JrYa&F}F__ENL#suur;t3o)rYVWG%h+Sm zn)a%r^i}-ssTp$bj-TMR8CW9|e0vqXLC}DJ!1gP&)>gXo7VY#y#&q5t3;lFbuG_X2 z>a}XwGM@xX6uIUr%gO#R<5@%2YSZqcz^u+TdjHm`VIj2!DKHdl*iqmE$`qNcg6_C)P_fzvkyF4s1|kQgVrQR=-2b-XHE8Qdn0a;f43%yuG5^m zDK&lZp)o@~*i44VTvFEaU%9SJSEU3$iIAiozm+iqT6*NgG7zKe!rn` z&Glx^*=1e%h4E1-vPM%_x*2n*>F4Jy;)J_&nrh6z@LG0{E*|o#w!G=-jWmJeosNpe z5#RYFqFffvPF%j_+WxHb>$g*zVuw%tCELS3bB`@jTC;GMaiKua;%T6cR`Sd_u6NA4 z(v-6AYJ`5vVmiOV%C%YNzZAR8K7NcPK=s@|_p@KB$;ERMHgd9;7g8?#!E%yD)<>-f zKWpq{t@vVt=US{ClH4m-tKP0v7g1sQ%PZ=Qet*O0<9MXIXx+oAG}Ffjc^+hps(kBT zCCTj`EHRbxBv?$T+oUL9$9aCr!%H^Om5#&Ldh({f5kG!KKTO`jZnfORuuj|gW_X*( zo6OLQiq8A+iUW_$E7MgTVO}kHS2#AMoLFvKctfSNI^im^`$Da9Uw!JM?a?)8+8-Cz zF-AAbcG!aSucuk&i;|rO_6F75|Jz`7%ktNTmqo_=6c#PIqZYOPdCbc+n}_`(8up7; z-R$_IUfS|{Hv9bcPVWz;^UuJv=S21!_qUrjpX}(}2ir?5CCg;zy-?m&D#l{mKVl)`KKX2#yUwuVu=$s$ z{TKS0Y1x~^6h3?_;M6eRa6lwyLd!toN@bD1QA75Q0rY;9lihv(PqO8{SIm&#_asd) z`0eM9$ty<3uE~GgYE|l-X(=2hw6*z%0=t8Lbc zO-+5o*#OJ!lRX-r8!6etBe3S>)%9>%sN4VNs?m@0eV4GL-LmZ2+1+_E2w;TVu)?b$^M*m zX5QI*Tis8bT*W7TB7ULA;yL8-##SF=8Y@@bS4OetAxwKiT~f;@2Y{ zP1_-R>7`|tmfC@XM!nrWdag6#qU?IBIWr<6jk^1|npJ$rwi~!Mg>nT8^^%z0%jbtz zQ?8t=ihgWaF=gG?uQK0aXUEK6L7{ZiXBE{2ENEdE=6yp*hsI^=c`loy7SuD=Xd(4SHJuF%cz_U zjd^EJR+Z1ul$!l8z_oavpv1<4$VGF$yi4zIc{MepuYSoqoCWt4*vh&FE5BRSMJ$!* zcI7HRJ9y>;^Yc`>ai(2JU*&Yl1(WZ`?>u`hbb;-%OXwB6V){ozc0`oW#_LS4Xw9Kj zCm4PGdi_?u5Q*H4~yVscg=%H`!I9b%I&1& z*=_yo(9Dm2-F?3IeCk^TY5ICcJyv2<8ZGE(c1U=x{k!OJ9I*V;_3b-{W5^ zU3{TXX2ic%KK|03dUp*C@B#Y{lx<|&*c>rD2CQ#8VM8oJ7f3z0PbMx?c4OKE^_|-|JmP!cVCb%=4r?j&~ z{&nsxQEZJaz^DWBG)V{=#S;-rdJIzeQD@9;VARo?OS7j>!H@z_j|aL4%q(ugEb3Mt z*p!lFCUbvlFT3u==;QUJa)zIqXWW}{PCR1s?%nIWuU!k~nAW$Q!>!tvUiaC3`X0Z# zcX+`Co9{ut#D78k_?p9khu58nd~6_O`thOe2i`6D+TyON9J;Q%^H)uYjGZ-S9&OMg zeEvZ@oh3F?4P3%H=&3K79lDu0SzX^Fy3T>UG!urmU@iD34sbk-j-CPIlq*p7fcSm^ z;|sxFOf&P^OD4{9&7LzS50f@}y=4j%?@Iy3))Y&`OlE?Rya|L#u-J8uGSmv18ykH= z8UV2HsW0;p0yi_!NQ_9b`+v*yiHK}<`JjU;ANu2@g!8=nehM^~7Y)2P%* zWHw0cW#T6Xh(^>eNQ1@qh;l5%vyJ^lL3tD=oCc0v$vz=|V|#zkCl6Z=<$wWZTOO#Ug8>*dE~ zGNE1y0wfIxxH~#ow4_99fI+VbWN))zt9ax{U?MNC3H(lZyG0z-^@z>@@BrYefSgET zhlMd9L9<~xfG`!;uV2rE_F`%?Eoh%R7*UV|?(J};9UCWBOnCStsf-LvAahXEgW$mv zfl9Myn8=|)1g-okOd!EA5|WoU=)0sj+>8hz&}A+_kPc*wcVo)V)UnpqRx;H=P~k%G z4v1Ry>`L%8eff7R1QG;b?AIyziYqlD<>R3#^5v);w zb_Xd!ikMXqYcash-EBO}o1=|8#{{*hajk(NBiq)=$I*=ru;1Pl&>YM{h!qAFkll`q+pF$VfoK zRs%{=>6Mj)OrpxFsB7>op0W!V+LBNH{2;DMLs$J}Jl|`PIlZMIZx9X%0h)1-@Gt3p z)1*5?4I~Y`@yj?;;5sp5+4_8C?>K`|6%*!C|Ha9AVoxH}WW$m0SvYP_91EUgW$`@l z@iW}fy2tw%TUuJ0d@;OzL70b!K#k$vVclWc%_}A$5xhi4|N0F9>n;gfr(0Dl<3at6 z@{)j#zue*O_L>2rP=)Dr`WaA3N3W^0Jjotd~A(%3Qt12M6(?y*vEkbgc zsj07kHp*zRKt#TR$#ovge}K6?DLHp~JqyNaAF8W~`$k^U3gG9fgaAi!P_W@7jq=>) z2b+sPprWd#=J`0l_|&OKHNAqAO>m9+LdU|b`0(LT#^(sF=XXGkAd4N(Tm_!nn0v`O zt$hV%?`+U9h}76`TT`co!G^-2G9ZYDj~sCYd0|cpc2js}=21&uK0b2F@L!*Z!744@Q<`0)TQIo)bth5B(pP!rv$5H~Kkg-YaK6p^5P-I5ZyZ+|>1hkG@9 zeF&qBB$5NOU;HAk03AD!6BaTV*lDo~C^u3bN>_=ZcLdFyP8^?cs z(qlRm6H9NyI0)PSM7Kn85*SYDtj#(G2FYoDn5SSE<`4B{_F$<6Cj0Mgk>wT0$NbA z5regKap7PgZFvf4NS|R!hBvkgYz+*mPhx4mm}^pEwFLfEV8K9Ir?Gwas%m*-Iy6!y zpf{5FC>B4-&_-V z@odMV2%R(@OiNCl57Js5P78`0vyy0)5QK1PePL;hVb|E2YL@laEZ$Ul#ZA_IXihOz zj?S8^G^q4_GN`hyb>@%z+(gy-Ox~qX(hY5`?%VZ0K`4j;s~HC8PwMxGxcI3mVGJjXNh8eG!1dV#J#OgH zxj?F=k=RT4-8u%B-HDWLktqOcOv||`IhK%z#p0J!vH zAYtGG7dAj1RjFR8@pN$fppl6{Ayfgm&8e6YKP>oL*XJ%1lKp<8t4A?Jd0?VQgNghF zGu)-8OiYw4GVURL7XYS*;`0w8cY^nxfbrnA)vH~>we$2e!`v(# zj60zpmR)Owg)Mty(u%l#-r_R*cPl@tFe7I;u_?w*{N&Qoc3Oh1LcAs1+}({xZ0f#X zWIN0vE`gxg`_*$*orz}|?eyvR`Dz_2j!|A>>O}|j%mjqEoeT3z4CDik!bcbG44=XF z2z{O&n#!}R?B_QfiFiVJz`ETM#u|!Q$=PyRB5W@w%4HmhL_IZkrQFk)*G4^e^9Ef} z2b%ci#4c8zyno3z=M9{O#O~$786zo7s`q@EgA*wfVTu3XwXoRFJ&vU|a$s{{liQC! zApe!fu$_2RfO)kfR@s+v6p*QKk_ns^WyL=6{sWz6_Q7HcVl43wmY}0d$jaL4R<=s* zr@{L_zr=`>LwIsJXpOFl;ho<$8K|MqHZwEBFAuDPjVsQN-pm0oz=>HQamNS22B(zS z`Sag12M%8C;Q6`u6JOA~F7-rJq1*>GVwJpnZbPxng$uht2*ns$Lr>2e35QBin)H;K zO+R1EompS@`4FuJi5S7$pLkK>oI%lQ^!naCz5`+MD?n5N)2jfJ(no1&?5g6~{_0cb}V!8S9gQ5-&p$DvmQa zhE2h~i>OEp;=nt*x>!zlas1S&+7%L8Xx z-{|Odq1HGH%kjWqamblsJ3NMo=oTEiM5KxOLJ&6z97B`d*3VqfM~!1iUUJ#nYj}w9 z>%|{))XVbtw>le#aZtcgaH!(rMcUfhe$dZz9J5MGiS-7a{%S~HK*oOcZ?N)D9Wfjr1z$MBLd%V83tU{J1+xGaRo6hwU|_ zdnb-1AcX+WgJ=%$^TB@IesV0eXUsP#Olw?o7<7_7kblBd)a}uuHQ++efN%sd&TEHs z;e)ydnb4(WW|xY_goR+~HV;-Ds1mp8GSWAHU~4iVFTGVy3mN&jLVYDy4Ec~k8*g1d ztP;~>wKy$JC8eoIK>n|i7X7I-E}JixG0(?WJq(8b-kWLc5y*SZP;ly{w154od0)iK zB8(PE&ngpGv`7gyFb-MOX3~$2CTxI}sD1BWHM`=`?EkE&YwOnu30;F?_f&6+^CYp^ zfe^*(tGSukDhiRTB-I`~tPnR)&=q#`=57=u%E}NZ%jjVxa&x0*IBa1NU*m#3zX!)F zp?MEG*!i+CNxz9pmE$2ko2H3Lq*mIjwjuv;R^I`(%_%G%n$+(-U+C$Xo}3QBs%O7F z*llJO7I~1_!_nsk)}_6J!<=s*R>}zPQuXB(B!uAYdU|>n2EQJByGYo2gbG3x?3!L; z!$Ea{{!u1to9E3y!;M%axLgassQ!Y&1ODo8UOlP?p0yG-xhpB z{DgnC2elhw5{lUH1|($$3}8qJ3VomZml@`JP-t~icQ8%fkwwt>dl96Lq#x2VBju~| zn(UepYlkS!xU{rP+Q#yGDXVE)S7j2bIAT19!VxaV+b>X1Pd^zI6Q&)T)ExiEvbu(4 z_38QA<9f{#Wgo+w(b40~aL9svaa*VN#NL~HhUbd*Q>fogfeJG9q&uEL2r_*!an|Dk}P)Bw6|y>FE!1a}6%_SWgr!_|JyMpEJl>P zh#~n5Bmu5O#0nRZnt50_#7h*u>g8tAL=B7@!uHaiz$9TDZX*O1 z^hzY+r`dTl^ums8P<{x8pq+435L6>L#uO0;a0J;da?Ks#xbn|6rm3uRTRt0}o8gIx zb5VjSL8A#O5XpfsJ7)r=HOxqfj0siig7fb;sW2ZEZ;NJ8F0fh7gf|ZOVn)`tcsdnq zzU<)|eZVezE?wFRPPPXo^@JDpq&2uo;kp_~EQ9+k*wzjHzitAXbVMEhV}0gUAb|wz z^{_BRx3M2G^mK@+L)~Wq)r9QY9|j7L{~kGgI;vEN50&=Pr3cUAE91N)EEOn1r6HB^ z_Vxy89f+}yiidR_ijG>eIn-5hjCI9jNrWDm@N0!YuW;rWLAaxOWka59CB%o}IsH@SOIAw!LFCFrnYymGYP|pI?n= zXi(UqydL`eYN{*UCf(UHpJ5nJ^$j)DWCU9%{H#oPtM=?38gdvUrqEzBWB;MTTkm`K z4j?O-;ygxu1{}Ei)Ab^SUsuac$9Ipm8TE``0!B*A`2H$3_>Ur|9x>4KG<9Key@r-$$PKRj*)X zS-0=22#Y$ncke48Q)IsqfN-5y!^er8oUII9(^x8mtwdk9f%=w*92qUmqIf`U~2a7#ZN(_OJ-Zb8`;QfwHP%VjzASq?BG)GD6?^7 z;mtpswpDjjC7*o9SX8D^c+9Sx(JhELZ<5pj&iV`4m60W3a0&*$zQUwk>SAmx^!jx%($T4zTn;x3pZtM<>!EG>6nJ-aC&x7%o!?Eg1BdtXVN+ zV2&}ub%4f%qHUkVY<~?ju)n{5!GmpEw-N&k;_IrHbRAMVaf9sGjAqdJ!BsNC#G|t+ zh6|ty2~E>&pSlYdGZM@xID(11+1Y-lb(?2@?PllTW?TeDAU@moX4DdU6O?z$C;Kg9 z|DOT>tFcjuhe>EHjX9$KrNmiJj}Oeches2LUkR%6$mHZhACD$dpUb%H_)l7yvkGTbLVyFtxKB;$UYCW zRFc(%$}T6^V}*>(ERmo&u{cm{W9k5=YX8w_xTS-fx_}!?cWxmn&E3T822%m> zJEiT{x@$4vKm_64+z`3T3&H4}QDUjSk@b&F#Uljqt;W~_0xxBFe*knMfd{WO@ zm|ajPLQ}ZE?W`zYhHGw&(acUBbii~D_%w$cb5{mV4PBpv#fc--roL-=UkWa zTlWmDTLSsm#1((<-rb1oLZ1zfm9>w&$O_tzRu7&ogu#kKfU~_1_R`&M2OFrpeXW-+ zTv!nL2iVTnVKVy4Mk&8u=MeKig?y-=I?luCb4UHPm~HvOwoaC7t7%wMc^6ZH8tGDa zn^=#_3B(FWY~G*rLFBziNi6~xC}ir=>Glv4$rm9%gc9BI>BR)V3REIFm8}1o6q( ztOxizJ7t-y3p&L;6f%^8nrk-Y?*02!)X5^MjSMTZw+bJ9U(_u)I=c_yQ$irk^MLxF zqoP0SL@M(q3=cV3H&Y#-O#WxTnfS3SfHD3mAUT=3r`1wc{(sJUg0`@L>=owHJe^sR z?eqd(>n4FA{8EKca+%HJNX6{yJH5JmGB94i#P@B~A5jFbg)<>i^%(O*Cy7N*X_&(A z92FF24q1VEjhcy*a95kAw;~J-qkOp&1XIqY410;82@_ z(KN~}q}z5KZ>pqaTKBXq=&&R z11^NkdhuN>dPzX?N_Ju4OwUn?lTWFXZxdbx!X0DlYN28z*%WYTYJoI$`pJ9rsT3-+ z+Ym%;lKb^V9eMY8uV0@o!tQ{09^L;n7|dxLWw=Xsz+?n2NynibMO%d)WZSr*_q<_R z$QY%ynfl`SBmt%r6a=vTw_j|TmOIwMS(nOD*~euqui*KuYr>~M_-~V{i%LqaiFApM zu~Y9DJ|8;-j;t_z-ZkjTUzkqaBOhjSy|Fdn*dJ2@46wqY!U@-q@?Z1DLZUXXD9ftW$aX>Zt$Kh}Z!u7OfnC=ad9^PZ+Zl;mOB zfb#eu+DrrpbX{;TP4BZyrJtMoY;gZZR1YZ?W;QFc&YO6%&QV`zcjwym)=x(tJlafB zQs36w0bmCi;VR%1r%8Iz3UAI%Cr0MxOy>i(PD@tT(5`%8aXji47M4H4-we^8I|u_r zR6AB;$P&sv(=*0#1;*A(Xyfm#`r+x-3SNjFg5e7!V%QCtDVlK~_Mvc8Mz+KJm2#dW zAP|E{NGbPu)T{2>GeL89xBf|l+c zdR(u3ejzPnyt%qh0_TTA%b@GxXT4Tx-D3fR;~y40E2=GhN4rrfSopEzVD}M1b2p$e zZIh&0ehlDBoiOw6K8qj?f+Lt9!3hk4*C=2&!T-DNm1htaNnFfpZD}zk`t<5*?Gcur zSThQJv~I*1gOpe#5t)P;P%ihHtY>lAriK6J0>HotxGrXLoG5y#^fN)w2VlBOd4|IJ z%%&2&fT83fqaW}?-Iufmv$hlbx42jGELrjj+ZsLb6Gm1V z6N&{nXi4{4>Ft?#2S6OnnBK5i3ZR#lRl^+hfif3VsT zFmGG$AICEx)pPm{Ml_#75^f{J3wfdcn5 zh7vKl11V2duEqf1}LU^RrcXZ-?M&hF}H7HI2=8`(C()Wvj4Mt<2KOK4;q zKM9YCf&wpNykJ4x#M{g4;6ax0dR8gFp{x{}*W`eAe_ZGP&E(MT_W$wZaLnhb7Q?f^ z3MUu>K#0hfFiU`KT!e!Vs@vI@n;rXh;+8f7odvo|MI5>EW1pucF}e7pXJOOAj8F;b z$)&~b;0jD4=t$TcMvU`M$aDfShtzv661ZVWnr|BBysnY25?3+7>ERALqkm}Lk?$#I zY}T+IAR z>`R$!h*Uj*;dC-z)c00(X<#uQikjii2B`BBX@%bMY3Cte1+$(2I7Z-bS-nX z-XoyIl-MS3f)hA?65Jo+d8G)$T{7VW29sz=TM4*bIzHz>QcQjG$5GQAgF2=3w|7Q) zQp*&g<#Kr=6Xa=c;=(>KXdz`W#(h@CMYSELS(1eYlq4U0Cs-T7c*ybsoutVghC9LV zw<-^uMtd;l(c>KNX~^=2Bc2L+V`2xj|IdvTx~Db(zCN0GEZmN***^t!DbXsHH-;L}<2xrAO`hnlPRd#`6kH9J%QYhD$k~Dhb5_ z76tXTFPI@QOO)_h4kEz>!8@HnItJlbE}i*zZ{Jo1iDTNXzPw|9AwqeduicrLY?q*h zgY0(%^PMeQwh&oPFRE=ds6p4DytKY_DH@H-5ho|PdHmZo)PlPNp3&{LC~;n_sjjX7 z;pc5(;dRvRDypiLAZuh~axZVTUZ&@H#KL7SZZj5a1k`{}tpbcf=BkjmJpZHPMu2!V zEots>Scv<}14gh}khZMZ==$vkh&J7Fe<|JP%>}0KIe{bmh6P>1)ih1!YHfwMu{QqZ zHiVO`EXdEd*&cQWQ(R1klZGci2_o>E@|qP`5s1e2Zo>61+@M z>H3{?7v)N(#1gzq9rZt9Y~XwkfuHzb0RVT&gT#QU6O)HR>AOo!{0&chVimq?Mu#;( zrwa;2;1QSKTDu=3Q%v6TA{sPX4J}yrTbL?n1>vJgM<-pjqbT7kS`*kiaK6Fs`Ic zRHrz|gqRlkl+%DWfkpQcpSvNh;Aq%P9H4iQ1cA+Xwvstw!ydI6-|5%Qwyq76EWAU1 zNBNAi+@AQ|Vhpu%HkqiVQYTvlh#@~|bWgnrIKR_~E;JrY9p@pjYb50sP;AbF`5t%G z0AjhI{CN%eHg?_46*t~iBUK*AE>Tgax;`a-;3AHFg9?&EQhZ#H6m`Tu^Vdm1uA z5D6>puQO$NwNq>8boU=$HTAn`MHR=UnooenaLLaHXlJK?7j3(mn%WiU^I^u83M-yi zt@w`bI+nz50>)iK$6S75njcG4o>m$o8IA~EF&~O zLI;t8(|RSS)SO*Leb_2 zcm&{sNmKS^;2`-h>@Qv(jIpPtIaYf(?&krn13ya7936*T5yu>(6*$rGn3WfWsCxC(tqG$-B2KxRB z7D_5%|0AWeXQb|bU@HI4Zk-KD=93n5ysD^eN%kOqK4K@2^ZQAwRHln&`~GU${~~V6 zoF*Ic2b;D$w>(z1P=Wgk>?Y9fx@BeAWo2+tNN6dXes=+^!D*S1EPXTH@HcH%CiU&F zr@Au!C5jHnF$aZ`vux^=Da1`Q zZxEzS5{`t@;g*moyBF%!a@6n{1|{#`M{U(!SS^nZ*(yw7-MZ_5#)r-)RG6tH9X*1v zTYo)HQ?+mn4HXslM0>_?_a1l~KkIjb>KySkDH#Wqm8XG5__p4VF$VCA5J4BdC(Zj0 z+(IwqegqzpPqhNLF>u)S@-%~+OZv%IQ-p*_h!Z$+Nz4#gyBoOgz>rn%9{$eDMt=jp z2B|5#@Yx=rQi(gm&0B@h3Rr_fpoh&VQ-F&apMr}Vx9wTh+BIu5PM&<&#M|~xL22C2 z3uW|JfUJwGwUg1$ndRNKED6J0-++JxpeMt5YZYY39V0U&tp|JxB!Z}@kqD^^;p&7K z4Vmv8b5!)P2??*^&)~kl1UbvQmEnXR=*z^MFc!RIP}MW)V=^Gz0v*j$W ztQ2x={`EVU>{<~~QEs@LVvJzf%f>067uBA`_GRkUj`U(c0vfDIgZho+*f6Zv z>PwSNPcKeIHph}5WmyBD0UXV|kSfD&>@nK*^SW+n=f*+$R9N~U^ZXSXri0?1mOkH1 zMyD6O$47 zK))nv0$3s0tch+t9j)~06|FRFEGIDgPZu=>^{&ox(VY8UVMBCRbLlNCQT<=zy<;8b zq3Oim09mb9fUB&6eI#+_!g-F0a}zR4aEmKORuC2uu%wzgUBp!rV_Eo{!}<^=%X86e z>NOCC61aC&4hJpR|3O_$XNjol{~O^^Vsi79#>wj~!GTmP$L&bTA~_#$=Wqgwnb$so z@dA;+06=z!6$SCb1o)u@m&HSKC+cTX0Dif_?(j2s&~Hjh3qev#MH>^^I|P^mWsV0} z&BTWattIi(o+;q;FvIWP`IS%QnirD^{hf)aFR`Ff?fMMB4qo0EolwXIm9M`>---;o z!4?EhRDeW*0PHQkK@)A*bDsH#_a4=nw}DoOt?u{NbrYz^lXVB``lMs2*FeOB#YzQ6 z@SUKgf@wsYbI6AAqv^1$WEfEAY z@d5!lbcq4J_dnJ7UY?#0fu(h}1vR+*14%Ls9z*;C{r#&jvCMcFLFf&Ji7Ytaw6(TE zCIP#_ho~+9Bl|9sT95QQ;KZ=r9vB|3))zlqdxH&hy9!JOGh{xUGB@7~=l}u(6&#A2 zc>OmtcyxYK{1b?E-XLB^;ENY8dM2PLSVRe>0+G{`qBtR^$nF@cx7OKY5yxXB1bk{%E_h?Y*S51?6^*L!?TT+r+&WPlv-s z508h5Y1A{>y|L3Do_YAJ>Gqf0?2Tv7(fcdcHp)8lDP^7O{&wL&^0)6NZcozv?KKVN zC+HZuGl~Qv+uts_4c8F7m7kXfltF?|YSTO0=gc)YKYKJtTw|nb9_8VmXVyFFw^ToK z-XvbIvXv`VQoef5q7mJbdGb;RK3ZMpNNbyE(%msLEX|azr!;k)j@HRe8Y`hE`)iJa z5daY*i*sTwjL#f$w6Q5)a~wIpH8nd;9rahZUY6C|lEt<15fV;xSo{_C58}*^^EOl9CFnHcJ`R(P#n5J+)l4@d?Z9kuk~1fq|1d z?H`xQ-$oQs50$;mz;Tin-y>3+rEnXd5PurEX5*NWU})fSq*X=-6lr_ZaK zu13iiz;D&jQ~dvEd-JfE|F(U6vZhdpq>w}@t&*i^qK%4nEhI(S5D`i>i54o+o~@#^ z7bz4P(xQdZN|v zyv9@qXY8v_`#2O&&a8UFaguw2`;_I_0M9_UgyES^t{g9~p;s9e@(**u-i`;o6y6YD zU7z}7lb7=Fg$p(ty|>JAX->?0lZ~P536q6y#|A>9O0}kxH|*Il>2^e#!TxdP$(C~) zTuZO`ePel@O}bYhD?-_SDNt?Op;hEBe=AU)yGw6jt^d)M`AK)hT&JcC###depB%ch z+dJB>^7zm8&};3z+=`K0KUB_~PMLb^arFjwWH^7RRbg<;QtB1&Z2Ek^*dH}rQ4Gn2 zT}np;8UFrlHr|DO$~!_tAa&|&^G&CXPhZ~h?9#TiW^Dt;$#;GQ^4&*!xrdn)7?RBh zTY!DN!oJU(qECDCUD$8hVs&-9&e^5p;)ST?CugkARFL~huje4ka<5w2e0VV#_&%;j!-k%rwD9tjD zjPlp^^V1g***qu0yx;0Z z7kQPLPcI#9%H|Fg#=GN6TCnHs+xxC3&TmiE_$()(NBL^F?fh<5qg?Te zI{rDx_A#F)wCd@Up`#;by#sH`y8|OMmVH2-@Y=P8LL#B;WwK7QUq zSDDSTvh1Vq%xl-WRE`SGk~UxQ^oMP`_SR%fw#Ww=Dq&cG-M{R54MPwY}3jkfI@ z537)S%v|zk)AhNF_3Cg>icQM^y)UwNPE`+gK4H;WAS4@`HMO4N zK6h@@o4C;P1J9=GYHLT7mbIVOxqR$ajnb%d!MVk|&J47*@=#w?Rd5|cz4V>VGDS}# zc4_RWidJHeT*><(yEU}z1a*$r4z$0Q;JLT23u?C~P^zTodTyTVREf*?+pNa5rJylC z?ue&}Ip^dheogueql028sXKeKTVB+qi(h9A@uX;9S-9W$oclK8y=y-;ulRPtV#)R1 zo&!{O*JRtp6-SJ{2l&L)gyt=8Dex=n?2NxBZcb;UUCkP(WquLUnqrqNUmpMM@cM|z za#?nHHMQOFgu?Pgi`&aH_vX}{o41UsTCVU##AU8$3;53G(3obP18u&&pxtL=^j%{} zR#hEnOpop>2nrE7SQc=#OpUpBiBOztnVIfEF-m{9nCo!mQ5wZyo*<>chhx^kvtR1| zelc&Emj@nq#^s$}^fw z)8mmqgQI-P>Fv^6Q%vt)DpO8B_NBm8K|mxFr$EirpeW_VzD$4c{K!#9QQK-e<49@n z`Pd}80NeUxeKpLDTq+iJToDfVC@ERAxa&tsTxYhoA0PBZ>4yuo{nFBTTjcsErrt-%{bhhf2U;P&HoE%(n#M3i(*vxZm$v(48U&ub|y?-rZ*_UvM zzG-Y)teULPT}?YVwTbfeQ+oCtOd#i%eE3pLW2R?(lrKp0mYFwCe&Dn4a+NdN@uRP& zr5*oXDQ7rbnJMjx3kECCG#0JiA9WO78mkqTS$p!A{jILw-K?M}X zo*DA;dDVRjW2lA9%5N`i-M0VsS*_IvvOE}1l9o8Q&u!gvE&AHPua>(#ulYXd>GX6P z6ch*Tr%KA5W5zAsaIo|RyG`?^k(8g3s;}~W*|~qunGbhPp5t^}KS&CcIOWyXuU)6? z`dmkonRhl--(0@b3!_h-l7wo-l^;OT7Fz<)x%G{$f^c~@^Y z@85b^{2pEqlAIm?MaH~s&9#!H@z1FalsNQR+!njP9}C6W;r~s|q)$~PN)nPLd_{Xs zd1KtL=wGqh9P-B>SIWAE?f=1NCxYGl{{y>_i^iyn^H5&@Pp6>Zk)2B>aCjN?5HimM z{%}XRs*R0|zP7fCZgDwsq!_aWM38af!=mQ{q1w=y4#YhWZl564yaOj20bnGNHm72? z&;Qc%_~nA!#rc@p^>phhh_Mg@QRbZed=?_{;8AS>$Z_~1@|0hu7~Vbp>xY#^zt|$$ zziW4NB?{5B&6qI*Lpvc7T?zb;g93^g(iV#%MKk+(g@l~sSyu|q{UQ-d8~}fy1G0gV z(K4DHRN``Z99cIP;qNN)<;xeq!P=O^q@}0(jr<2e63>INf$rh|h~Yy2Be`q|n}IQV zQX5c^3m{wqaRcjw_$K4aIdZZ&a9rm}`Y-$R|Wr@(yb-eoyx z5I`wO$;$G#%q&H;%EjESE*}T28UJtr9556^*1>F8e80QI!|THSPqel1jfv^KptXU# zN=E8aQ=a76z*ZlM-9@#Pm6g}v8Vv5QD14|f!`TLcKHdsN&BMEVv-hP2Ybn(S6Kf?@ z|E&1^mi2F;aR4k^2NrVfexA8=6Oj_sI)O2H`RC78XP5uc{PdqmUj0YPqw!e5#2Yz? zV4G{7y&V~;i7~aw-o5F0RuI9>oHeVbZzGs*aD29Eosfw0+zx$Zw3RphY!|3tQ2%RX zWo4a$$mSL^C@<7}N+^U(C4q6y=>}*36k;3b(IC8^Xx9SiK4SN?qtGCQ-IiShNj@*7 zz+*J6%X_5rVQy|BUYI)@Z5CYuJ_;*WS`1ZQ(y#gtb^!kq!G6Lka&2m;F1&M~IS)}N zRh$%=eNV0m2@BhmhzEQca@k?l1g+CwB>Pg}n*S4FwrDrT>fJ+29Sia62UM_;{zZ&KfE2$dFW-4OKXY!0^`1Tc1qI6Fta##91Wyl*i$KLFYSER-{0^v;#O6ZMZD?U}3GBeW z7P_@P6*8k)7x?}^%lUgXAm{JZ+M?V1cv;OTAw@#M-|lcuB@}$3a+kjG?w;XgOP9){ zNPwNi0dQ`X?Z1c)l&S?V7Q+MLRDA1H>X2O0?~e&jS3<4Q7koj%sBla%v{>ZR;ZvjTP~gy>V`jAMJza)?5Adm1%IS^t-U|5ZuJOr4|!pyh5a zDD+zqvW&_Oeg5;x<(_8N_!&Wu0FL^=m3MqEznrD>E+xjt_R?irmrGa2>+N^>+HQ<` zZ+q#I`CV}*X{uP4`d!0%^E-xj)|jtcAYAOfX@%5%8g1~Ks()<$uepkx;@=+%I4ju4 zo2z`>wWuzabg|*Od$@Xpdy_Q4dp@1)u>M8cDoN$u%Iw$irM164pg=E z!(qO8UOFLQ(^cPzGiTd1guZG$zG>FvbE)-6|{VXEH&#ick<}YsvQkn-wik%`w)Gsg%$4B+_XT zB!ILyseL}WAGGyou)@Z;-v%}&gv~`<-N6;%!Hthf6!s5z!-Uy|0CQjDjJCs7)v%Hd zwj07*i8~TBdvczexB0;i4;Ijuf!wx(#Zi?#8Cet5$Kk3NS53 z!r=wXhfJUnl2?yH2i&d~(DY+2K#B~MAB46^EHUuuYIe9dOs1v`KYaL50gfo3iIU0v zONjU7LE<>o+qg?^(qw(49SsJ#@F!};ya;}2n2=d7tDUJkd1wj!R70F5^#QB0nfCkH z;t}7Ztt`4%Lc$@SaD>d&A!HM(Gn4c^^B|gs&-HA^NcUaw$FMd+%F`8aXvC1t31uir zn|`>`sB`)YVDmn}jpISYBpfw}ac9Khn?AIT3{HqML+j_`4JzMD*U;C)H41*xQG2HPrD?nDszsvZb#18GBB58K!7AnD?E4k-s-eCK@x)vyDE_9ZCqSJ z@u<0S@Ro_{5Mm&raVF*!*Y;PjA}tD`WdLKFSU!<>AjSBuz)S}J`S{r91x3*B8)jN6LG6iN z*?^OTR*8m}6Z;F%K4<{~5L*aO9|{E<71_r^G@N^Pde;!cCyZrg5oy!Olds_L!9jsg zgKy*k?4FQbcQqy^C91KcL^H6xuP+$O2g9dZsh?=RUb@jP`1&!BC9+z|koBs`3Jne2 zGYQtV^P;VXWozZVSSDj(Dd-s*W zy|U4Faq5sS5i9lLR7t5wEJBWuBPTv%>?B!QPs9EAC z!6?OT;@5&1IoUXtH$lYQ_vHAt{_5uDX2H>`9q1KElR#3Z(7k+xTY4GprG_?8cjkdj zr-^#dW27tLLDzL?#86z51S8CWx|x^i=?}d)R!B*4AuEqqIzwAolcc*C9A<9UV=b4H z*7Z~29S0UWKM4vUEGHN+*lf#(g!AQoOzgBUAoOcNV$g!&nog0%kx$eDzMnjURAHpT z&l$6Voydw7__$rSLV}+zqFqR0bD(kTSTTr8;1gJW^Dbqn4L(}7};7kHwxF)QqgkUKm0}rlS)134jk*?{}feK%|%<_ z&{Ft(#m~V(HPjm99^vIfek4>5@JwCE1k5%v>3(_mo*rq z%=7v9_)eU$Bq# zA!-{;`FSd76*F~yNc{pY3>*OsFlqb-D{^RdkEy9onbKp~4s(}~=bRZQI|&Xe3Hum_ z^_M(^Dik%?;lVKBWnxRO+7TuZ)}a2g1%CvY_2Imnv z&8jc_ohSaIYDWL>!^~xalfEVSR`HHt=~E-L z88O$u47zF(E|j)cs+HgJAsHrEL^wz@1Vbu0_*!u`5N7)67umT1FhRwxG)oWABf z?!v;ts>Ss$TAB)*@8{(B;?my#>}-PR+y1=|p6#B59}vkO)4sA)VTZjWd+OF%e&$_U zh^UD>KL(`1VQ~p2qhpZHMMn=`aWP^%Y}AkT0(5Zo)~(E6;cnU7siUtjtp!Qx?rG5$ zL{UPrnrHST@tcICyZfl(!h?@no}L+4iOvmA(IctOE-vkhH#s;DF0J1n19V|F5Qn_7 z9j2cO+_>QeNKQ85vw@j%$eA>(1et<0md`!%npJ|Jc2snA%SH0w1rPwqey?&w%Q=gJ z(4<1%4XA%nCXxmMT{Rw$%1CYdfKy8j-Vn*ka?RiOBxs)ic4)iXM{C*Wc4Be?w(i&N z?n_9co`Uwrqr0k2MDhtO_uyQ13P@)wF`)KEd4uNKXxAQ#v23aS?3JU6vpWCoz!)!= zmcn_IqNy!th!7i5RA0Z4&SpJL^j;KF&u9gmg+jqlTvTtAt}`pJT3_1LYE-V8p(PUB z;H|E%ZsX)cpaf!$f%B_j?Q_}5`;SD(mGLlj3E#@jiSUw86cv4GJ}R#a=U*{tkRs5Lk9S5}^CGqfb(X`;AkmVR4~2Lw z6mWiE!;v&_Fv=Vn_`i;2a!S>)z<4M^vpk4kC`6@kS{0mn2`YAJhDs1E@hj_HNCJyP zXTH%CL^wn5v_(>vT~5BUuHSl5v$bZA{oLRCBs%Qjdo#E1Ud4fYgrNa;x0;3?!&FJ9 z*GkChCx6h?t{>JE()r!odQJe`ry?l&NQme2QQn84CX?i9eS0Is@q-VnK5Y?Z0E)R8 zw?4o$>}$WL&cFgly647YH!Yu`tZ}kBK0Wf+?lg$kk?(f`#l^Sv2qB>O;5g^BNotXF zLG4Wb1j@xcQ9vlr7{6uXNY!jiF7kX|cA*X;AoFdI0G1sOuQ6?*t&&{JNAdVxv)>=>Y za0@CWjy3K_UzC;2MsH*3mXX#A;WP(@c%$%+b(Hf{R$DVAL$W3=X=bIfIxSivW~+b9 zM(cyh)E^D#t3OF`MN3v>I6UNW{^z-r-a{b#WEQP0yY^8;=!$zBw$&Qj(2_6|EbPR(hjHgy_$TiAfQ-;8LTS0#?u7j$u+qpg1Tr zxd}SR|8|SHL8bt~4En9g0dAL|QJk&RJp^{olVZP7}0kv|m zv*3~}@D>3%5V&bqW2P|f9!fl15OL5EK~N0$ImGY5`lY@IG?(>XLb5m!=zRr^IMMb9 zLSTowf&{2RR-hRe9vgcJOd)WH;JmpU{9v<6F$4?2*#`MAC3t(-uW6$Sj*_e^V|M31 zK6XvrF$o>nK`eP9G=q;0=)E-=7K$M%A=vK(e1VQ{JX%1{$<@ESa!&eaDLCGAImF|CCqr0s@JyWQGqKISnb? zTG|u>93c%$3stHK(R&IZ6-~yfAOfF3sRIIzTr?(ND{^y-QG{qol^G5=f&oEnBP1GZ z24ZZsa%F+n|Urjxa-E`tkiYEwq@=Be$n&kE{k)6`x{_oBRFJroZ z<-z&8y7>D_H>GywZih%_f?%OwI*;95+E7hb8XGmnGmLG}_v0N*xeXP@iU}3@&jlT^} zAKWK&*ih(-?gku13I*T=5D{{EdP^PB9%^BI%PjebP+Mi&aK`48OLz-hF-F~-z{8d{VR?Zzf zBls-)LnV(T#^saA3<`;}8-0k$24p_FZ*EFZJo;hCREmuQ3$-FqNZ?Q*IZqcSHvLx< zx_t_iM5L#ow5mM7-tnQ@fN4HrA4@ycU1m#Dm^<2nFr7I#V?i>VS(gC`2cf zap{9ustM`rBqevgW&<9l%|aR*C-Uq;F7 z9Rz&RBdxu65)owLu6CL~ZJ!@=@1K%;cAh4>r+)i$u7f)PzE4wf$Hw{qd<;_Bv>1Q+kf z$7i1tO$kRllDpo)XoQ#`N+4|wD|Fj+1E!jNlQ!ljHJ19jFF@Dv@v@Xt>2Ub+Dx)4M5niOd{R1*zZJ*dw^PYSB1# z;gPI@ODLm>&RSgzC)vAbHVfc&)@%)vC*JQs@au6cI5ng#Vxr32zDbwOcNZQ-BP6Hs zq8!|J0HakTvLSr$hN=ZiR0|b;B3Ruu_^pORh+B(CGs`_peh0V4%YE$2hah_ZuUsEk zNitw?bea4z=J$Ozdu8u`{<@_l&dAuf8%`w9-0wg&XY-Z@sS`Zmv9y+-yFuMyjaGcd-o#X=li!MON5s3DJ zNWsZ@LE}U|N*JOCyVdF7O!LpG2EQbdhJziV3*DnzEMS1zkYN#y+90OSeh2&7aJC6s z!JnO!Ma^fs9Sok30s9{vu}0-PH>TkpnoZqGL~B9xuoL4*F?>>7fp_UFhVG7Wm=z%u zji1C*1qsMFpfN<@66ej3#8fE^*EhU0Yf*QflG+3dNT;T(TZu1&&sTKeEiWh{FGxx@ z7YvZBY0SJqa1Lp!C?g1}$$Li3l-=s?NFxVt;nJl?5`|2>;Dh1_mrc0moP)gZF3=w( zHdynuaP>_A`bQQElmm-9kdYnL>~pUTvWAYfwxWzzpbMv9*N|&k$*DeGUv)xbm+T`) zc$8~{=P!W|L|zZ0#&Dz5^*T%`p@w)jxbuh}L6>Oq_?T7e8Avf8j0^ns^MifdJFIre z4I83LLq+5+J{%EIApT7#_*dQun@3cF{D$QkO%S<@Gfm-BVuDoP0s8JVt8IBzjg zT91IT)?cuO!*S_R)s5n07dl~nZyF3gOdcNIb#DrGi<)R`&dY#mO`^C{_%DlAS#lRl*nVDliO3co%Jdk5d12V`J3*LQlzk zxu<&ScfQ{tGjPvu1Dp5K$KNM(^mSVC$l8zy=Dc0mZ)n&SNswkby}L8>^OF-7a8d3C zp*#`4mA8xAYBW42yIwO>mEjD%br7WnvpB!|I5w{Wwi~?R=KbZq&2$t_2KE-s&2bMJ z?KPTvtHapXKszB-f#o_LQZTp&jsWYzI#cBn+4qlG-XEcJk-EoS>M zx90a!&udaAVUR?H3pCLO5-0LCzXn_$fCbNu|E(n;)9AZ4f+wMkBSGYV8_vTd(Xm+c zx7QF~V6&WbCtigoUAD;*| z-m~EdT?TWOnIDKl3kDuICW(nFX(vz#QYf$X%Q>LsBI_`GKH+~-fv&n?EggtvdqS~3F0&U%-iKD)e$ zTFlV5=xNFhr6S}8rNI-AQ7K{;fJWyNXxcyuHeP3$Mgn+&4})p{+>2Q6c(B6gAM7f` zK#jnwLI#2IVLA?`>{k5m>3mG!!EUG{Hv(OFQ_M32Q5O z9(wUQY};})HwX-v;f`x+{q0=%W`zz;jt{jTvbEg~Vc_=nFmBLQ2}#c<=5-LGffo^_w?1h_oOhxl1apP!m#O;1E^USba2jbkQBCE^E-3 z$p~rp?c_$S+Kilp!E?Yfq26|gV3@8?yR`=gR#;`{3?Fp5UZ_~N--HfvTKpKs-m6~| z+ZKGt2%Xi4O$^8T#D)q9e9s6KY@xBYqNRU_sYxn+G^q;Ujv$Kj5lI7@1-d+qiN z)*_G?A9GTGQC|TTqK~s7kzC4@Z)L)fI;-8fYyI@}^j;SiUxbxGYJ9wpBnhDAr+5s0 zg}bR2>c>^ll{;mljo~Ee*D&cO+TdMLM}2(c!^%&FHnD6}hxf*mTkqSvB1V7fowm*+ zGfiR{h#bxYIKYthuc5<8-*QJtwymqcq<)P2wvpYz5mRm?4R@N_uYndbZu=!7Nu?qR z?3CLc06>!WJP%gzjw&=p$Ezr`@z_#eGVcJ!*0)7|-_3-?e-0%nIhorgO=0ck%@$AU zJm3%Rg#P;ImuD9JyL)jstY5COhZ*TDWFAdR?6XhtHgb*ha+|Q$p?dYXICwmekiiDu zhB7P}a3gtTES(xts^p{)Bhk-f65%!CSPMe9ZWb+EScAKq6kn7YJeSeyaX^wZC|xM& zgdWv3u!0|`@~2as*;M$i5a&ezam4bvBNNatPj9$@EmJ##mC=47t5^RA?F4G!om(A6 zPm>CxuJ-8??1LgUfb$aUm*cRS4PA(A<;aORP8EznU zT>7#xu~NKh!~EfQ0YGdCAi7;;rcU!2oId0=;3#mR#|*`RYj=_yE_zL@hX;UT$e;|- z@J_?1qxE9fX^vZYNNfNKPQr%(9r%RZ`i3Dy=IrYn&6AKD z+*6HaTg`{*1vXv@&=YcapX#oiNqQp)z+!84|14~6O-({w!!@|M`LRnV_$#Sz;&3XT z@0q`%@<~Fbbx;V^pD|nBb1FDAv<%0dA&3rmSU{__j_z{MI~AfrCcW_Rw|sO&@**7I z5#8++A{Slh#~vLDRTw=OX*M=>a1GWBR)Rmi=VsZF)HsFj%TmWMc)X9(n`rIPQ)W9}L++vFa0ZAf+f2<_b)FI9a~{@($J(fhe&@VZ?2npsZ_Hz>IqAoz0C63YUku81DsSXUk2S zHtnJrr%%|+=P<*!tiy@l1|cd&`ZEk_*`2QWZ+CGYWoj>;C$#y}%1v zMj0F;MPunbZIb{C2uB3&+52zocHuJW*Uq4w?rogM$H({nnY$uLBKKkbrFV0aq0E){ zQ;1~tKhxs181A7L=yWRj(Y2m7DRTcAu!i?;=DX!mZIx!WW50g=(gyBG_#;qSVqQlw zs!3)}_>^0Gk3BBDBCSWVxgn&PlVxN1khuxkqD3-wHA4^aAH-D@AXn>;A6W`5k&7Dy zNlh!Nm#2`DlY03tNbeN4nhoElQ{GO;5gM?a+R!N?Ik_ZFBz<2r+k&}w5t)L$%~v;L zCibQGTW{!?JkW{YQTGayM1U!{ac8{PKuzy4cw`kZzfMV2Fh}*^ z`6ObNlh?aNU~WIa0vV=qpb5KnR_}OxFT*%)^aJ%ByLnCZl=!UP(SYp<2JDV*Ob0Hq zv-jmsL-YUS>C?LqX38rns%vTauaKLJ@RJI;OfZp7R{-4(raACm#=Y2?a{B~a@_e(i zv)=}hjCTC_H~p1h%En^=m9qD-ERZrsJ=2!6m~0ZP8sLI+d!lpkyJh=No;(>78|&B?2JCBFf^#0net)Y`Q)wQ;zWkNBl$kCAi!oMOA=jx2HJS?oABPh&oW(x z9_8%0bHprh`+Ky%Xs)&bz)vrE4u|gMWWDoYXJk!P*ixt+0y$v^8a9EX^1tBxB7>)C z+8@Rc@q+jxI0an7zbAodK8$^(%52z_@L~zR!g3-*o4ZycrZdn?!|Z3lw*zPp^YVY- zkDP{*O3f5D_l|dgj{v%6D_la8sj%6jghqbUQ`x*=uSH zzDb;^Q1Mnq)ELTt*fMNz`Zzi=LI?uj9mrjTFRu$Ax`s`4uRncSisPXaE!4&PD52YM zw_37EibZkVpS2gK>Uy-Rx1}NxN3aLAb(Nhp3HGtDNVVeGf_`vQuO?UH7)QjXm|UEE z81{ssM8^Wu%btKYY9$HM0zRcQa$uQ=l!q65=;!rQ8RL()FttV4>1({0<;-lV|67-& zcfT0w;2+=?BJ*{0gRs9Q5hxe6!1eS4=jH$meu;qId%Sga2**}jUKl876Z(()@FDol z6(Lj&+Eox-H9-+Uc=bHA42y98spFS(v+k?nN<~>r!H7K&cK{w?LkIpo%1vzy8UPvc zV41dFGlSg}VNKzVpy&wX5o>l_r^IXy4F#bGliC@Uv7GUV)1$oYzQtX}OaNJI@*p8$ zSW3DgV&()`ABMHNDQW=Q?t@f7*kG6w!K7OqW1ZN_@$vT12MFp27Ei5!oQ0uDsCQw8fSs3+ zNMRzjAFoC1P_Yd~i3u2y*WyaL4-XW8rBI5up@$Usu`j681@7N~M{EJypc{Naj}Kc)07{(BPh0G%#I3 zd{DK5eZVTmu#WKkQB=-36L1~Dh-9V@Ttovw42teb(`(?2?TF-`+w`GT>L!*g!oWy>-*Y()L#@cf~LfC4-4H)h;v%W7(( zm)^Vc-Ar=wkxR>RsP6f0Ps1qp_ju~w8^x%xYcVl_Xnb+@%m$)YhOL$T=u)*ZH4YRc zT!3ch`qljE#|?zM8tpqR;v6blcyXdL#?8`bk(w23X@WJ>3a+DbtQI!4>qI8|c30P& zuF-%}j^Q+QS&W#vTiC3Pc@FP6bM=N`yk9&gc^3;^bVhvVV5xSNiPkx$xY==T;hqM_@em zj^e0@B>GmVuI(D=?$&sIzoE%Sso~tUoR2TT@QUi}6KOqcWgOUdV97#E-#2gCR6}Po zU8JO?>jS?dvYQI=ssrMG`i*&S--l-Rwv%a@e{jjL#B7dBO{ zbmO(}#yliCaE*$}4bl~(j3r+5@JDFaR8w=6!N%EWlDvD_x{Vu?agNsFLTTv8`1ugr zP-Y988TZyTH;<~`WS?Zi?6CVVWx8Y8Az3Z=NuFy@XrtZi&LL{$KeG>Ds1A`d=4ew` zlZf4DG+*SF>Lm*n_@W6yG|%4|;K*GxEu@4(FCevd(e9aQ8l;UN)-muBy9)Ep;M|HMVQYm#(ZvDgI9OdA6l0W#?gdX3nD?4^_I40?&RnzD^rYfD{+5* zoPAu13s(#7;>(3O|NLtXTCMNz+2nMG4>_v=L|#Fq+qI?q=HG#(vFgQL-1C#ujK=CN z*u}yKxA{Bg=05{R){ntazB1mklN0to?@h$HSpGB2{5}0l4I5PacQPew&kX_4`wn@; zorUiUuGe$(gqRPn=1_m}kk?U||4G%9|4ZTKrJn~`h7XQU?veFmR&hLGuM(|gg&b}m z_CR%y_vvH)fXbRIXG#|?QK1$ynH#@w3#@t#SV$ii_doaphDb2E;5*6E(15`+$@r?M zs;W=@4lv|3tSXSDk|OYSc-cwQCuo4l@EZFl_K4!Su_I|_*|)XM;%b+amEA3qk&+S! zYapa844Cwi0p)t!LaxV;uYhe9fe8T3@V{dhURa72)>8z?VR zfQ50M8)}>B?%`33q18os$w}ch5d~&qPxc+WHZ}uXhHB8vUL34#5ryw<5}oVDPf=%JYw1ExjhS(qy6 z?vPt;uNm!CT)HueacLsj&fBJ&Zrg7xwN;n>Zf5V)>t@=iHzJ)!Thhkd0kVqrp=NVp zYFDV*_E!q_UZ@bq(Eu8lHmKkxPfs7czRr#d$Es8D+JPxujZhGm~B1 zw^qsf!HAstaI|j;HK}qVhYxlIDIJkVG)?);k6>KBTy(Qc%b5bWmSKfrOh5bdv zC-x(79H^MUAu4NJ40ri)P>RrxE&{$mnz{rt7S74D@Pc`Oy6M&lGR(!L3>%~rzw{%# z0s`XoG-Ss(oxnisDm?yCy%v?1&%sP=+v(Gf-sYHK05AuBP%i)-UAzz18X|KbG(yWy zxI8XYoT%CX1cn%9;!W71a3OBqDB$Yb2yF}=<{sa-0)jwrL_5MH7Zepvd=q}=IboDJ z_xG}qH1DC|p`%^qM`Q8|YgTFaJ$$yw3xbh=dC_#R^BQ#nfIY5&MH?C!5`W_H>qn;9 zg#XG&dJLNenj-Og1B6eioI*D19Qgq(Qn6H|q&Zrladu<~%waIL2q2jKOW&EKD8>*#sV(vhtJ7qQ{}|5$R@pl5 zE+O_fBQ`s(B!#Qj{$N#{(P(s7P$tGHij7lZdERwoWwZ$Pp0i>dw7}Y^b_=o2V5UEt zc(}4t#B@UF;~2Du@c`Bo`mZ{uyKPkaxO?d09R8S}D8mzgp?#Y&MIP39+%PGMK;5h@ zpPtU#?T&)1kpc3qA@=d$B%bjMAE4i&d*-cNDJ3g=5BuaDdU%P8Ft-c`S-Aek7-4GT z?(f2*sdY|%@_$T+`YxXCslNpNmbwO`e<42~K+9Nw(48F}HK^SP4+ccO6h8e5R_ADF zjJMIK+0efa;GCZtZw9(W%xzNbE%1-MaJ>`_V2FH;QUg$HRb_OVNYFlMngIJ(*Sia_ zU{6;Z$mzEh34W()Dp*Ue?4?QOJ0(v*V&Uug&%Ez(3=`&B<-mcF&21%{uiBLp`hHy8 zl-kJBPW1A&*$I1Ui(28~M861@5rgkRwMeI93xRb%XvdA2ei?mIl@PA3S^j;>-ggqLZCcV~GGmrM8_!IJM77|@3rf*j%)NSZy>T?~IBtb=zUg!WzUUcYcVQ`CtT`9ume&&TW7cU^?ADE+GqJd7{BRj((l9c%{iwCATsABzX*GVb4RxKRUXT1Q(N%FR# zRWQIwjW|SevD-1sq>j!}9%m+4su1a3#^JX@LBVi+^&cv7Pey+K zYq;`)39DdAUt`m$E?!KgO|x8+n&}E3lSTW>VpxmA5$*JM{KhMzPI!-C5*>%NrE9rG znY|aotb1rzns)}P!Oz4ablCEj4V5~D6+69&-lAiiRP zR171>f`h0y37-hd>J?C|`qJ|_b8W#HA|7>QR7U;*`YZ)8xlt%hVtE4r7As<{=U#l z1uX&z3_;5aerQ0sx6xSib}1&v@h6aH18JZLaf#l2OW6-|3+`dy1th68#p>yXLI?qt z!rvG(o0s^rh6;r}>@}a+Bg?2G%UUKF7W{?Y-d;HG03wgdWqZxFqD9R<+PhJ`>3cz~ zaDl4gDQ{Wb4?{sov?XCQ_fqPEQKh;vGaU6nCnfNBp4$(kkHadkJf-DwdSPiUD+SVV|rvU=PH- ztZc2<6OvQX6byBT23yrBt3zL;88F%S&sH`b=gaD99wJo$ zqBbg}E*@@!;G6C0yJ4^l1UpdTFb#}y*zwZh4;?5({tC>9r=vyzP|^l+CMaixjavwo zOz7aMKRc-4gOj=$GkhNqY{v##()U ze+6)oLonGZ>M>KWjfOhV5dK~&lojjOF9azKNCVijKx!Xa3R#VXS9Xpwdb5eKAyMYR zv|kg?gg}9ms}T>cRO4>x8^5>RjA_n@6Ex~ZkW50SurmF@n8>1nK5oGcs)!YLnirj; z570*AWAsAlv8TX{Pz@1afP;c^u)Yn)4HpJvB(2@=KJM&RHey`z#&P};salpmW4f6f za%u(D%I_xHO}3z9BT7&d1P9UU#Gndap?HWDiPDi10edr9KjYY(r!DhcS3}fJBFvDZ zpWi$f$|ka2A-O#H=0k3rRG^Z9sDzEiTrXw zmZb8KIX>v<4j&(7Hg2*Viw+a+iK~ssq=6^^#=%TruoO70WD-0CtUkwG*+;iuTM1G= zx(l$t6^MB&Ncmq;ykK0_^wiVK=1mJP8(@9fbN=+<@cQxi=G-;A+&|j8e++%V%-)5i zZV9py(S-)UtQSi?S-i=P8h^+W>W;No)}go*?_y31XZ7IYHmk+$m&GM9qO*o%BqRb- zy_5K!a=$L>)pH4+2$^X$CKDps-VT7NCUGaY=!<1Z8=If4Yz%e1eJ8yL);E?lAu00_+mb%)#xsqd8y^rqG$KMq}YQ^o6$F zaa+&%<3#2<%1^<4aSg=IE?bDm;uDWXv*<^t@`<#r9$NE;4S69Vj9^+)Yw!03hgB%gYNd?*|BMX*;fYQmzT(6&9=oV{*6EtP?*#Y}A9M~0=TYF)3W zH9R+=m(OL%?0*)sntL8hyVsw7he1?V zQkn|IM!}$waBwtW3Xf={O1tSGf-^eK|GiapnKvc+Fx^ZqS^(bdAiG%b0NfN&h2`$eX`B$#Bg=hvwYW!KV8*>Xh34mYDvB z&-%;Wvzo*a2&n;TAAvqeT`qK{;M50wgrtiREd-{`Hn=6sa~((?;_A+f_J8E( z&=+AlsY^XKLCNo_{(WO3Db;J;)>Mz1)4jbCjshv-ZiP?06RqIDgE-8s)~#D7kJ=2q zwkR?UCw41QpyHi}noJ#^JCGBAljeyN4=k7R^N)7weeipXS|4AAJZNE6*Ohe54+<)? zb6hd@Yi*d{5ZAL)SiS#l(U&j#oi_zBrKYF8Y0Xhx|9fF%Fx8zrn`T;5qhB;tM?Uo4 z@NG)cl%|lMHOkXF<)~MY)iT#AE+}bZBSX*0JId1fa)`Apv$AHYQjz7j*E+p|b~HO6 zVl@lV>A7M9Yp`YJq74$V3bJvVN0BBFV(w8D~OVco#|`)H_$3KhLp3PHPb|SOGvRUe3n?*8GqB;rZf2 zfWWa{$i-i~NOm?kUZR_*sO+U8Nd_^IG?K6~f4EzBeEa6~M)8lXqtiofAw_T3F45>p z?6lRDO?fqkG~*bKKk0hn@YP;(df-b4NkFhFG?_dDlYYxdY5hrsdNz@hMn>N{gfg-8 z$}*A(=X~?n+sgp~v;QQ(XOD8rB;k7x{r$a1Cc%1_5x4MvTx0^1drHSs=B19A3GJEO ztfKNeL=AIB;+X=;Wb1raAR`Z8LBM;h)b9vVd2t$$8OW3Td=3f}=0$tlm*KMF=2rFk zX5BESH_dF8&Ru1C3WoOg?y>h;x}QCZ5|v207JD%?t5j`KDBDsMCZn~?m?gEC!C|CE z>{1-SkMju|jRQOBt;+F&KL-We`{m^HWeN-1RsO7rT9aaO5gErZqRUkMv#6Eo-J>@W zdQYZ~lv>6`YQ;_DOBa;NSuu63E52FZZc;DKqQ@Lo#pW+p*@Gg7H0r>_C=q&>(|*}m zgV=_rN(8i|vcbmpCsn`=mqNZ;kbH!R;1)4TEF3C6>m_ zoN`IdM6U|=+tn&F+316yn8yH;NA%E&=t{gXD6`hhz~EYF2oGiE)2MGC2VzWO$c-u~ zYX9ONWOW5iXzNfPFeFy3dV$#*W?%djnSaRp$1#AhiGPa;J|^}H`1$!nL_z`ND>eyJ zzz4l{`}P$Wi<1NkGAP2OjAg4?Y33zlTEQyl0IGOrBHByZ;szTdn*U32^AiK0kYO)R z&luGI%@PKMV`6r+J?!2(RorsF)drZGVURa7{-ZZmJqGHfu%G_={d)j@ba6>Z0zf-o ztE3L;-wQM#XSA-2ZfCQEUfe9vJD!}*zJH~~_Wp>7z}zJwZZB;`QpZ-$3SuIv{eKTs zRuB~T|Cn$Qi+ofLXhHZ0YXX=Hzz6%n?^7^QX}D)v()Glk3o@N zgD;EWNXKmM73c|2B5#M*!0+i!*ZigMmnuS441F}==n$VaCg=cy9t~M7_GC)Ww}#urWlO5?K~_W>Jpz~D z!HfyxxmLX!8%OhH%}EPzFgDKZgPCwMS=*I@6A-1k85=lx$wXL>{S@nD9;yRtFxNY3 zEhIjK=MITaqQRQiMMb^@>KJo>jVYAluD!q6x4JNjS7-mCv6GFfG5>U`i9fG(7Ry~} z_)1_PZ->9Gh3s@vxNZ}H3sU4Q@nf>S#KTfp@Q z(cj?s(8ZC;*Ud_dku3d#MWS zD6OWcqOkoLUOGwk(1r5e5J_V2R8AS*9+Md@H(u!oO=tFS77sipG{~S4!zjdg2?Oh| z0fgAl7Qs4n7#E=<*hRk5Na}@8I+;x&G1n3Oq*Lz&%*Qbwy#0p=*xwHyd7KbQC?O4S zNtA#3bQ2_aLi~=naU760)oMW%XoMjqsr9{DDT_D6H%}d!aH$k#ylL~8Hd-{ z2YZhX+S0T;3tFZC@iwMKM=%46`>Ty;4@M9uM@W#U><&3Q>w-PHP&NPh)vIbUmboid z-XZNN6Ahy;0HM2kwA`C`extJYgK2-DkMuT1Y|7NnWD}-E->V$&g?0LVq8RMtc^|Zh z3yRFv_mDg_q3`4^@KCOIe03u|_!?c-Jd0jm5nVMjuMOuRS`r9sq-vKWLwE&ggbs3j z&W2oPD*iY6zw}sK8n;W!(ksW#vQx~V5?fs8{p~n==c`k^-*5ODr50n;Db4SY3c>vc z4s3)@r*nN@-1!J*K5di#i;tnL$0tp!&Q5+ie(Kevc1cnH-u%H)#ehkDp(WSOw?wu7 z&*=o^eQ5kQ6CtR?7e-dY2qC+!cr)W=fAw#DR>050FPmIwE@*`;UIzxw-|4yb2Ie7O zOq7QM%Ky=J-P|Z}_okzb)9lxY^k@I#KS72g!KL>!9c)CE7bJ9E;t#td<4n0N5}Po( zL1Mf(R=0HH#WB>Q3lhZmj-2uo9@}-NF~x9@>Sa0pxvk6a?2awpM#P8CWQ!#Cyu+_; zg0Bvhar_;*z}#Y{WnAufLyO4w?b}a@=Zwx0dYiNDMbzW3N>9Y4k7N$^>?~3B^(t-@ zpzgIWby#8=s~qQjxiMNcLET%`S8?6f&2CbMRwi@OQ(xnn8XiA!YL&11XisG3{O|MH z9-r<%e$V1z#hwd)hd-}c1^J%4XcsoXb>??Mg zp@N#Gnqi?r1M_=Z^K+FSa2~Xml)QhehVo_Y9-sZsi!vvEuHCD2*T}g_&?oLe(Vr*$ z`zLgrxaXHQ+u%&_=4WM%DaKr_t`{( z`r!V;r83^ECHf8`f|> zHp|j@dx`(aWACjw8(jR+x>YeQ86F%wa5meV!`Wp(CmnL*ru2a83JOm)AM`iu`Ii*~ zKZZxUxcTF|xn}mv&*0AYOX8K@s2C6%sZ<5h_F)^amo|5}F45~)_qx;PQv}mz_enFe z_3UnmHF8!(u~9NZlam9liffIOTWP5(UdlI{^IrMAy>){1`AjRXU|@lweOgIs<72s6 zc-7>jue}Vt)~)jJnHKmzL~^W^9s3bt&14#mk4SpbC$HptQc_e8P>g2J*C2m6LZ{w3 zN8_rJdBMWN$BvCFt)RqmFTQhFFIbCVc~`5oy0db~86?dI_kDhWI9|EV?No8PcIZg0 z{^ET5>Y_VZ^KuK%^1YeA)CW(*(t2y^YeqR1p2MqIpT;}*7fMEIr7r##4da(1G5dM0 zl~d4$nMjF~0Z2oo?Y`STxw3ChJ3F(Z#n!mkt7FH)PX&JIceUo&#vf&r}amH+nTEK)Immrkm4*kHiMf7dD1lndH=pHUg^6|4Gc|R$Il=0 zy7M*NEI3(G?TgY1x#=rPe{Mc1v{CCVZR64r&13en6&EkYa#&Clk(w&+cIskmcsTD^ z+t|@o_8t4`0}Sifq=4BSj~*3qTwQjlMs?%jtK|o}TVG~qL$S=srqbQ{QNo)SJbjh3 zM+St;_^AQ^Ih2m1drpaFM>#30h3DWMm^Wr+3Y1lSSmKRJ;dRVL9{sTJhC?oF>bWz^ z1xg}+@JxMFQ@{IW(CSi+-|eR?NE~ zjq*bg#fSD~>2}PYb6t1AuJy&+&ZiiPuG+?@pyqQag_G+ulf{y+uNg6#`ME}R;Jf?T z+dZmoJ?l*Uyz@GL&fymuhZ_#rRwUU z19h|8E%h%?g>7Kv(nXB?S(Gk3i6OiEseQvI((PkwrR zU+(3RoWgmmno6U~Pmi`YIH{PDG;>e?a<4QXw=!(T;s`h@jgB@^D0XTTlhM(l%z34s zWaI)05;)%Si*r7fJLSJl-oX6F=2L5SBo%$`ZQ@vIpv&DC=fU-+p#k}ZcPehH2e8h4 zsOCxBb2s(>arfTgRQLZM_$j5*kd&lQ8bXpJrBIQKq9kM%x2#B_WE~}&D6}P6*_|>< zMn*Kr7DbW@nVH8KzsIX_f7bo|{rkJF-*w%e&vlQ|Iq&g&J)h6V%3nq~lCb~CxWDk- zu9u6_jjWsV#yY|tS*=o3?oxom#KFKoSMx7>-xoSg>7=q`ypThE?}I}2 z=lKd@@w%yX_gD{jf$%+Y$M?`Rh*ps_I*LH zU-07U+A3}|geO;sYd8tgxBr+!SKSlWUbEV@Xt10q%-Xk);@#~tqaC#D^{RVHSuaEf z%hC>8GumgQGct;plDDrZEIktx^f4mx!i`e@Slj{1hVMSyM_Q7&H5${-IW=vdcqcwS z+;b(kvU?F9CuQxeEj+ngvn6$kwyj=W;<3o9V)5 zKC*$=vBm~|v!h>(vKjGG3zFv)~5-0(msgaw}d!@l^9j_xtmM8ALZcZDLOvct_Bgc;jIFsJNoO^TS<-VPPQrIye zgza8h8d)*E&iODS!RyMTQoB-wWEe4}xJYIx)Qn)+TA^8ccj|Ki&Cq(d+1K*z63`oY#g$5?u;V&k{`e1K6rq zAW#%&3xc216O?~nB!#ELrZ$(5d7f<;3Th@-|85nP<*Qfkb_L1VbbMlBRwgWsh=(2) zOgocxMF86X>#J>cKvp?OkbWed7encJ%%bfodMyrfB!xQ8%yHMhn-(_P*u}-)JCrmo}L)uyn1u< zToCZVbaczyCB391j=3WKOi%Y=_G##iW3X&n)fql;ULo?Sdq zUhXf(6t!e;$YBd@b(#h~6+}auKy3&IITuDOD>iOi04vzb=0|?MENQ9V6l`da?UT%- z&70k%%HG=H?hs2aMKoK)9tCKPZMLCR0U^qOBaN*&klFmo#Z-$p8Q}i= z5QDFfgb{~eDL2EHR&!Pp=Pwf zmA*7qAh_nRO2$r}2Fe1kYh}?Q+TaIMPfrC9cRs|9Q%WniY4l*hnE0$BvBAJl?t^rY zL{{Xm^>lTYW5o!dcL#L#amcuD@4UXlhr0+k(v8 z#&-@#*#pF4;L3zSLKOHcD<3oP--uk#{@nv#>?>h(X(!(z*PvMb3C0ugy5eGTa{6jT zz(QaAwGpATt9~V*g0kTJ$vzdz!(r!S6sSPTTX2o+oVV$G|M280kpbij;fE5V2v?L8 z8z?2~>graBnXD_6Yx9Wk;}%|@e=#&P0lcm1&u&?AxcG!)y6u*>2Ae-M26h%87Q2`9 z0FViX`RG9N&$pzEzb7U!aSeW0B;crcyhgPkj$CNe8PkTS{qh2^dy}vmPzy?ritX@^ z{C(YN%oYO=mGTPzt@O;e$vrJhIq*k8*+8-%Nu5Q|wund&xVym3W}SU~mhtNa*()9` zAqfnPXX(<}R#so6bl5BbK!Ma9`Ry6FK?WS`%DT{C9_{)4WxpUHsaKRQUvNJF^X#Qnv_JW zD3ajn+a+e*rn1fwn=()bsuaeI{fC026Q~Ytg`uc2AoFweW2<(DJR2bb0!2FIVGI4} zNyy$tg#dWg7?i0XHu;s%589Z0XU4183~j_qt6xlyDkRr+={7}mdv;~{41QtZFW;1C zOy%S3Lu>JB0+va)yzLS3^MpNoqd`3wi2f&-?vw;Al5o~L&ejbYm0`mb;%AgNULM3q zf;&xv2YYlcgg63o$_;+U(l4>FOz^yz=(1Gj6+kFrjj&%|Kk3xcAcIM@KoTTfk}jA| zwHKDKQAe=sIutrY(N7gAf!NQb`<_m+^Ocqynn9klgM$OHIR*WB0OIny;OHQ4&D_76 zXuFK_oM&BKZ&G4(qM$xD|4PkikAP6KgB4*0?5qM4jrCWTh8}7gcd)i59%LKP3Xqt* z%N@3_ng&Y^m zL__xV)O+{d9FNp`Q>MfQsU3djj)1wv^9570G(Pch}1t_VC#6S}~BNwl-;4WCO0rkUr zKj~1$R<&MX&yiD(jyu3S$!`7%ga=3A73XIWk*I;OtU>65KsPyj_w0U#)YsLvnOJ4}4 zj{r}=_lflAdV<;o292*&;rP_2OkrpGZMqZi3PLKvwI?z$5K5+lf=n(wf>=0kCCH^G zK4W5|eIg0eka{VkQJlyh84XqR7J+2|F&p-u@M0zQzct*tmBzexrn z(h8y(<)c-2N$w)76Vf5nj>V2}qS9hI_8n(FyQpp6D`m;vT;4SPO^ppj?OX^d8;ty7 znabTqrTGv3;R3KE5+7D@|7w}{*2q781`3AQs#RqlKQG21bcfLQK+~}%J8oFs%JO`G zu)zj+0I@W|i$-$c)7AL``?Pg);oVcvvRFb1{$XhLRN?AD1Qn1Q=$e>F#!u4e3n4lu5EBbyN7ydi2(mZ;qY2+( zFhZ(Pl-9y9hNuxhG$YPA>RML?%+Q|#H`l!QBu;)CkLaZ=mb7jWYPBH*T+nKS>m8|3N4w8&1o+ z|2~+~9{#1GA|AyBgBC!DJDr`Kb7QugegBHOFhp2M2v5AhV<*d`ngkqaPti=%Am50{ zDb2c`O1)+|$SVHXTnNc-rtCj-$QQ>i52*cFYV*U~C-xenZ6Tcqo{iJpHK)skEc2;9 z^9*TwqUJiX4+KR=ziC@0G&Xeo5dd|@f>>gHb9s`M+RAWlQItSZam`+-p? zQU&&DtE*SL-Lg63{vb>+6rAFGP4sKtRBbLx-Y1?L zvvnk(1(qArTkDLmjE9*_fx6Y^a zHsUhE;{>Yzb#Rrwh-3rUB~mCj^>?VinwLVE&l4~cQtDsV44dO^CmG`>J-}NA-*gu` zBNSKVFkAJ5cn)Z~>3>Z=+fh?i zJu@>h3Wf{_E!161=ztL68<+8Ud~bS;!G!rpCZc>*RXItfIfBZG=Qp%jP^)KY9VU+Z zxDC!iaqpr!%;Kt-qsCJwUYXqiOC&eAZq&x|9{U-+qK>I~d{U?R&uYc_trVz`d*Bsz z+*2L+{hG(dT6mn%ORG3Nz6Cp&3&U_E0q7k$AdgCmqh)n|CCp7=gp!h(DJrd#)7A=> zlnn?8hW+rx*nd-e$)DtGMaA1#Hrzd95~Z;lFe3yPzGGk?@7{Pl1gXiZpY;Hq?JpR+ zJ>UjG41`u`*_C+o#NaMbGum7Ma|=#D($}iDMPk ze!QxxicIsk?qoN^hn1)+UadjaWdG98qPRIDN=NdkucZ~s=+JQdJOubH(iGAo`2MTg z1l?*EoaEEe_Nq0?T`~pH?7lIYb_8*Q2x^ml5Ya3UE~k%`<(Q$>!#sqS{OOxF`;yZ1 zpv0tPQ9VwtDvBLX_;Tjt$zu4D5n5CJaE&JE4=IEbkHZ0M2l=zjwT+4xzrKBVlB>ri zT|P*GZkhbn+8PXmHNc@gvRc%LxNEMz+GAXB#Fkn1QZAd74nh3DYndu*lLOUy`k`!~ z>%@Z@?63-`$7t?j4wR?3zXMO&m|8mDb?6oI|J1pVTitBj+yk=nLi}mI^$hbQkbK{RjuFKs3h_5-~_uXA{n__oR}6QB2U= zhW}_oxwJ2a-#|dP&d@(J#*db@{$FiwP2f(mNy<-O8P|@vpF2C-!GLedvf%~Q!lj{h zj|Kys^+p8G)3D$nupU&3uD#ifSq(I}TVXSJA5fjHo*q~3v1@T&=AAcns1%Y40m7y) z%tn2Boa@KWI66XP9W&7ACQ=0}W=uhsKV7X+@(UOVOP;*iG@6j}JC`}6%R2f!k#`f< zN3MDE)?k*rf%p<8+Om?*i)CeHH^DNx`@<7CV%3M366ZA{#_$KkKV3$5xLZ|K$HgVP za+Lr$xQo~9JB~7n^Wbw&k0$I9HXVD-4=bd*7!G@mi^6;o2j?vBhO%qs6VE;^epQ59 zOSG!*T3RB&2m!Yu;Kq#`7o1TT!5?0U*73LRUM;pGt8=xLl{tvTBN#4f$+|jd7ZD9@ zWETywZ4H@ohcxVqa5}yPyNsxzQ88@5E0J#@Hg`C6FTeN;{27NFdSKLvG#0D`;2XGC zMFp(lB;tR`QwqPsFuFh{C@j{_xf-gpbGnYk@AL z#{EV;m}J{PL~yqUp;yaAL`uGQCc#SsA2YgAN|u#zS63W;8rB<_-OXuR@=~a;@e8Lx z`i*6?ql-)L=n(8#@Jk$#V&Wk6MB>+qIpNhS6w_je@7~7k;m*gbW@6eJclQZB=^-(G z#=pb1A~r9txu*Tvx?j8Z9A|#c?6AxyN=55T3KJCiMyR?9-`c3y`|mt95WgF<6kHzP zMboY&Fkngz)!{SqP9q%zUtvw$nuYEknN}{RRPsVb!f4}9G?C$ec3?jW7GfTp-6tSg zB2;xZm|yi;-b~p>KY?yVRag^}K2rt=ZEGbYN89Ttm-PSU61p0I0wNC#k0>Q@M;;8%3gq@F3p>IKU^?h8;RK-G#efdM!k>-OA<5 zDcaiyFC-`*JqDt_9|qq$DJdzMIqb0_wjfZ9GQbAjP)jN&=n%_d&6nmT7H}*1`1sj7 zpYEM(R_$T<^ngC=2Wu1EFsY`~)AykMabYrR%Lk zP7)%CNW%>Euk)u@I0T_ffXfSUUV-YHld|8?kn8&1HO5P7n&*L8C(BRWGu+=MjKU+| zPlXFxN!TR55}Nj1$+{)j`k;?fK~Hf%gU0IW9#6nX@E=gY#`G5D|LNizC$!V*2GcP{ z1@1rLnd=|WYi5p3G8<(0jwpT8o%{FCzZD%#{PgP@8o0X3>H4HiC>89YpUWcf{!sWLw`cEF<7};X`F~2p>znX~13@oY^by zr&uI(VF)@x9Gvr@*xc!4tmE=|(nrz61lf!sp;dTlp2BMe@R%QT`k<%3#l^+`pY(R# zE?={T z6Mk!tKhveluPt=bHYBbvs6alUp{=u}hnwNlPX-8Bt4;%40QzX7``IQCW{+A~`6F8b zS$d{l{R?PAwY9Z0Je_a={)GOS*e8Ya;cylqW*Ei%@GB+}UvXHrpfTahjGv#%hT{N1 zGjP@B;34OHWmjdu0Mhon5_?hvR)NG$9#Aj|ce$aJf^RxJ_euN-2#xJnqfjVt-kQ&Y zm=p>{WtvEh``uU>Jn4f7eCx@l0^uNzFHf0Lx+pkhpbwMzhM+VnR_rurG+;ZsFm#q} zJ3U9`0?5tnV6t=a#Aak@skQwxh7sMXwxo>?knUW-9IxsHEk*HVqMKP`@TkHDgY1yd zNnwq&peAG_vyS!Z`SYhZ`@xnBhA%4@fq(3x@7qi^vJ@S_$c~$QkF^{O!|XPEm~YW5 zk>K^kI;OH*G>XQWK4&bEWiJF zRnv)3KH?iAMV*)(sbT5=>9R)m*bL>oJZ9)K(anky;^G7kgM^Z3tRdpq0fH>FvvZL{ zpt)7iW%S4N4J_s!4G$CbmxCtXCzF$VrA8*ztv>ZX|0=`N4*9?yFv8)%wyF^$+N#*o z5DAju3D$(MBVIsPR*Q>^$dKNUefu~JawGTf)u4kCS>TfXQ6xeu)2y0D9m@jmF+5P(F0|?t4n7m!WpOB0qeEl z+v#RaEv-a&xQxMy1%9kL7+86Db^}Yn)g1ov4#3&mB^y=xbB0Io$~`Fs!txEzQ^VOd z$ef8Wb^8e9n$O?8J8UN?)?u{w(4j?B?qHFZKK2Z=+IU$x8>ojh0MwEmoGlT?>#-YK zQ_$JfV`jlzEF`3b%{Q-U(ZrXVyC$$SaYiLCprX6u?0MgcFZLXqS=Mlpl6qyW#>>If zl^qXddM&yK>-lLc*#;$gR%Q%sJbqbQJh>Jyh6_M<>N~bqPzmeFe+rh;IKJp)IR-Ef9VM#d zAr`Dde5>yL8AvaiN9_9b6tM(DGkeEhS5~fpR!T$!s&mOVF-kE30U{zIM1*;{yr^mL zMP1cmi#F=(vmVmEkySH1@we419YuDiXCZ&ES@+;bdTuK$_avt7fQI+ZmoLZEN3bSI zcFxEm(l-#JnSGZOfLAn5qTE+0Z={0|fl|>C;sBz`W?p*|?Aj6hJ^g;KHmj%)ZM61$U}oy*-11 z3PRRXg6nISkz*BQhBC&EyL{==lpT{A!8optyo?lcJb4lhmCDf6S9V65^dg$ZAKs5} z)fvE+-(|~(aP$*#85#3HLMU;E@$bFN%*G|kCbDN!X-rL95A%+;mKGxepu)>BrC(^# zeO+AyOquTr2_zlD86OHQWdT(u=HmnCt>?gL7WdGAX1?41)6{t3qB&jNgC}e~ya!1! z;a+a4WHG3|wNO19;5w-wz)48Z9_2@fFvaz2^ciA^MNB17H}^O@XMG+I&DQ?ceju{m zOXSV}0A`k>?1f9=moPXbk;*!KXaefqo#>h2vAAX{pJ$n(aJ&a~Lt<)6n}LVty0Wht3?h}vq7xg4j_URwU0Dg+qWC{uj_}wAg7PCp4TM* zx2rF5<4vGZq~!3w6e%EYNaLqcjp8zR$b3m7dG&)k|;j>PgRqzGZ~Pga#ou-aaWIp&gq8 zED5{n!md{4oRB!!Qej-7ZxJ#gb>tD1GDuestXfu)OBAW#J`|&C7|bnV?sKiG8|0v9 zD~v4k8c8)fiiVNsvl?#knGb4qv2%JQ5FSzkx0U%zmIM<2WL(MJkeVU@tZZI(=(UT( z)X8y5trGP~3VL!6bU`IB14QqKI!p0JOz;Ec2!UWv#Q;I2XU;B@XiFYeB;~Yh*vv}! z&Ng9An0F68ThJz4re;r7HmvZE>`(GjQz*;>3uJtt{Tk{v1OuyV7F1sBQx2h3+-%6L z!)|Z;kJmDWR%J%0DmKv*8UGIJVKMO$OIdU@=145KjGL!OA-bwp*jSS!iK|zye(t?# zSP1VixM|Vg_O^ZO{VCVJ$6q&q$U>p+#1S@NG7rFghxpU=56q6b^&8Wm4rz*1v}*EJdM?S zMf|kM{B5U%w>TED@Msb}DbKQHb0}mBL28f3?#p6xdd4_$uUJHhYB&{K%#Xa3szs8H%-#tP=}{&g$i)YXl&;a4&*szU>JyBsLlfp}S(VGoWCiA;`Ci=+E?pka zQ@Duq&_pR@Y<#PA5uC~$VGRJx?`~e605NLOXro!ofW-nyNWQi0%sfau5*j)$nv>vN zESZ-f3JK6MaDb`soB0SK{^L0ZU!9Oen~sza3;3p#Q0J62R~RrzvBUNo?wG!5Y3q#P zFb<3w9c^Uz7?Q&XdW$7^5MhTv?weUKdZS@<#vFbMUTI=XNTVIFI(7_)3CT4j9~WuY z{_6#oAvBg4ED$0M;#2zzCo>%F0{#P<`boDfqq$ZYj6Q7_1cs?0;3t zOcf?P)(rFBxczH4z{&|8$-5GCVzaUq0_d#~I|Z%^@YOQ(fcq35$A!hRt_u9V{jeIu zYk#4rL3jvzP)uvx9zNHvCp5R!RauEmW=3{Ih{Z^i7qYMY7&i~fptD30yv$?RI)yO; zlO8gsJ(yqx43x~4B7vbK+ZL@j@ekUL3VAG9$s%Nyp=fRs>|kMS!hehL2?y*Zq{yPo zEe;h@xPjq1d(F^0gnNPD6oS$4#hW*wfNSDqUesV&ZUe(1w_GVbFU*ZF2@Sy+`=YY) zBAWl?ygWm-G2oIEd1^YZK+rywReD}MD!0-R{$fRvHDCg#VJl1r0!1TWA43SZx#q=UddzcrtZTU+0m zKASHb7a8$ALGa$eS#Otaaoj`G(pp3d=leQE+Mp7eQ$w2hbnGsP3(UUT+pl}kyF^!* zq*0BQbSsJRp1WR(e#9C#k+T*Xj`e%F8m>fkEWXrEzuv6)WlNzzDk7MJMldRreTs1M zh#(1$3x$Q-+C9p%w8|!6j$&O<Y zP~&A7KI4a;*otT!yoil5_26h5UQM!G6uFCtfLhq61O!-PXf26vG4H_hE?4n2n9P^* zh1Xub9sJNVCVVQZsOc;I3N~Pr!`eFn2+MAgTLssxp@9KnbzP0y6QI^S4ic)^zaz6e zYlM5W0IP;%qcoNRB|Ttf)|Sp*4W`<5l!W_Eoi80E9!lhoG#DW^t`uxYU(wsHB{l8a zHwgK%(vy>S;-P_O#{1+Kdcx??c}V&yUe3Q7dJXf~lGtLMWD57wL12}MUPRG7bf_Jd zsSJ;WrP?x4VUf!={Mq_qF4tkPaqhZ2heAHU+Dwt_EQ$lleoZ;%0rtrT|T zvv4{{PVc+;hYL_vR%Rf->|ttV<{m7hkk%832Lb?`{y2VV;hvDDWr; zkaRw?h$VpIxeP{+c3qA_cqB(k;9jy$kW=O%vCP7{>&m>iS1KwhBwBh0R&Ma=wC-~J z2^a{i-6-PqW&UrtP`X@b`xnndfxrLp+VuhT*?-oP*fh#})`WP$J8^YZuZbUMx3|KP+ zh0T6}9z9;0ugvqN^(E@1(qJ@-ymlA&cf_r?bo*E@mTc0HzT-*3G0KIbLaZ%BwqlkMV2+# zRQ_SN4#fOuXyFGdL0qgT5@-z-b{c zv0X#jrDZV$0BbW~0)d(evSNskf&hy1^XJcZfPzcj^Pve4D567pAfQAc2ankc9UKCQXc(6{z22Ar&2(@IOq}7v}=(i!jan&z@xp$66%e^J!`QNeXJx3}T2&I%arJ z^=R|=0vKn0OH92YzUh|3#8}_Gq;x6C@&P{ijyKlW1AN3dIR6-zFF|h%cN^C^`50$fX zG{PUA0wSO)06>-R1fEyq)X4?tfUqa4Pj4uz8=XP{N_6yHaq&*I2*aWbb6bWE|5D*E z{6)X294#Z(r3Rf`*Sdo@)j3*BwGfTqvbo{(F=m5@;C1`9rcp4f55qg<4saip3pksU zDJUp7l3`^# zsb0Nkgm>lU!sd%@|3*6<&S*K*LC7xHHsKcd?TX!jgO&0T-Z*90=bhN(buc^h#Y>IJ zqdV<;I0G817RWE4T4jCkTU{>o`|fQ?H=FYR24(Xc{r?oo7Cw)?DgZNieW`1Nz_Tx% zn)+u%)z{6cKh!}*j{_o135hXcG0r0Ma4E&{dy_Py?@Up|LU;X~usV96bQU{MTAzLT z3ZC3|SfBrV-Q~4=;-BFdyia9&A*_Q3Y@%zjc6{h`!36lyrVo~`y`Ge^V`qG)(6Id5HYG)Si}wdMM<8R`%G#z8;f5@up99V zwBrmKhdYPwtKRhVbV9dJgB=$k;CM`IyeOm)1S6Jc{^4wH3R(YxD!aqfR0wUP57>_I zJl3;P@Qg2G;|Ijy4Z(_oI3tkNOe5rIc7p#gKqWYyE!^5Is#hpBGQ%4SZ*!14VS6j{ zNJ~GI3V5DqTd3)xLtr0a&|f9S$~Z2J^UlJW9Z^!plJ%_{wA1)QgNUvJi;^rvF$oZy z7xjG;JXh$LIxf7B0`zAMrdJ(O*wET0tk%5pZLi9sz}HAR~Z18ZRY1Q2Nh^F(;?WLMe>j=stnS|j zefWRD-cQ)CXIYr2CFqbm2!wA3T)f!uWL_BcckAdrU=WyD(o~wBlEMXouE?>0}urOW$jx7zSz=&U$D6B4-9Hx9SY+n#N3v3iDEu~c=A z=G5aOJ${@j^YNb*n2fxvw7!pzoyAmdD@x|iQ#?7!S4EiIHTAC9bKceQ#=1`yPqIh+qicjD!E-R7^7SNZ!W+wRYBI7zly%5OIsgSdhR}aLACyTd0xy(X6em)to}1Y1^+#&2YW= zHwX0F(_4HB`QGPV*|$Hb;T;lEm_ms^pxj!^r~RhSx7s~g`dHBdi&3!+r*r3)_vMr{ zDd>HZQ+C^MF*bIl;@Jh-acz_(;0Z0DH3S=!b_% z;X=;e`8{&&m64I!(U|M7VwK$D(|vur@?JL!w4TU67kKE^YUitKC)*d;ci3qxFVkB-+4!^z*G#->u$X7$+;N6JQ9`T1q$H=O2bx3_yN`FT`fIj8~ewdO5d zYG+{d>WHPJPqyq4`<0&OwOSNC=fgPP`jIP-)vjFwZ$g&b*I8R8!!rL!ANeMWM*zH} zhnFrnLjTSo=TK+(HE`>x{SS^W(HiaI*mp|i_FlF5HTb_BF>Nfq#m6dDSkEu*|BxT( z?K!#XDmTURYvr?qYy7vq*Usqn6r_-Q6k+zjX|#u_3r(qdk} zP=T68h;B-+|_U~D~*$Zu0NiA<1tDXCid#?aznsH#J=)PT5vC8e6+_K{7!I0Pq zfIpAP4j+`#r}XCQwY`3)4kl^5&|rRd~wP7VXHMA$GQ)BmKR)r?=4CB2 zVX^Pnn!bgLKA6E4zVFAE51BF9vbQ~YWp7QsIkBJpYFhkifESH&Cra=g$5K*OJ==e3 zd_&*D#T=BEawk;P9M%^yONA*EPphi(30&m4i`U;lhd6w;Z)-H$uTfAT2$EiKi2;6z zjDJ>d+Ol=0%a6ncHw6#R>aXvtPWH(aU3#xr{(fTA$T%SAynS2YH+IQv@;acDurgh*#AEuV(R59-ZpC zti@^&E_=%4orO=ZmUGTq!##bU&bPD2XS}=|aT3z#^sJ*8r1g{MXXqS-XukWn*}8(q zc>f=edaw%iIrFY)ktY$x3{3PFNlw-!H7uj}$ABH;R&&F@u>=hrvJt8G4v}vqw zmgoxne{ss2&o}(KrON9DS6=~@^IvacO%&g>9mgy^oK!*$=eh)QDS2j0&AzsB(~mow zt@f$KRcl0K-yD!`Eae#UV$fdO!t=if^VzZy89)`n%x{ri_!=gAGdgq z8Pbx0s#8j!`?S3K< zf7B2$wJ1Kc$ZgT;;Kgx6%rTc-%QdT<^wN9zc~Xn`4^~`_Wxyq1>hf6TZ#nk&6)Pu3 z=SwW*P^$Ercw=R6F?U2^NuNjC<2{>O#l+9I{HSV^di|7J-PVCKJIk6mw)7FTUgcF0 zYr5r;4pl{Z()c*jHx8`^fk^NEii8+AA8nqI7BvDoXa#vP{#ZSj5xPLTc{ zev9LbYx0UJJ?z=a9S6I|ZS7B74-IXYHIv3TBO$&jTZz&%(ldkdNdNGJTH^0}C?r|q zz+Fv|HEW3L)bc6j<+65E+S?9cR;6UyzK(~E|khnLAOoVRl)|M1l!>vUy- zxehT(VO~=ClqX)yEC=R2kA8*iQP*ZBNn3o}WV`UTVeY&rjI_zeby7(UT<6sT6;! zzSsOj|LcX_`VHEM@9v9?oJl=H-Fl$bz%WH7#(#x*M@8fyguHrwmJMmzQz`s|gB3IB!v}p_JevN0Wre_w8EdGxnWMpb&#co^IyHZ(QsPDKr>;z+B?2)zM z20twmJSToxCX}a_?CMz3>7D(7X&EZ;_PwuT>hq4ZpHnkQ_0LF1I4mEze(+wS_UjH) zw!d#_01v-Q-?EFZI!h!s^XOP{{1Lx6!T@1K6c_qT-#Z-@p~vF+cGNqvwQDa_&@W84 zcs+Dpk+~4<1ZQ%6jj&PKuNKw)4y+M|9`8CDxP{;1^(re-?HuQszdK!-+Q-B$L1?vy z`^;anFt^ka9%|h06(+Gs*wegH`*)SVZ_0b8e5!`zGZNL#H#gFOk9SH(e2Kh#`8)_~ zXXK0O#-9YCpn+45Qx}}JXP>^U|0DOwd(zTY(`nS)Vfy>*y>^y=yUnHFi&(35$HbJN z3I~UuW-xI@mn?11O{jy_@D@~|H#{~nd~F?&{Y`;8@%e%R1z?lm2*k=ZwC}qAyBmLf z*ZH>4RLcKJfwYKj|MclR6wzR+-%U&V8B_>B!<5Q&(e(c_4DiMMi>b(9e~&(6pj$D< zZhNH@Rp>2nKxO{{>9t%Mw7XN>br(Ll$D>mh7O-Y#b(49ZrB+=OoXN6VZ6{3~PirP3aZ4;zS~ z%OFVfZNp00$KT(7U=lnqh+8i}$+AvTvKMVvgvO95FsVKEe-&z6Ko%QcpO+jeNi4Zk95} zJw2vNC(yfZExA$%eIlO~Uo7Q4ODeHw#qeyzWD!SvB$a`c4{gshXoAi&z`R2^FgJcZ zG4jUF%p`q|^kkzm6TS{Ru)vl-{XL+))ZYT!hH+q}*G3|CEoL{Wxa68?6yahKULkA- zfmc>wHA*AgTgHuL+hvN4Sug&Y)%r)0{3=z1i#V@Zaup ztaR)5Sp=m8&N0=(B2>=Y8i|whFt8HoCRVWXk=8XhItsy62{IJ1FHkl#ywMz3V88_G zQ3}ZmVXSWVq%VZFj_)ssA(xA+JN!T;^&*nVpMq45uu2jmmwrY^Et?9~0CBGePCgAg z_4JWLs(aah=vU&=2Eto{n-2A^4|smFhQN0q+5r3JtYVOMi5>zA0SX25_MIw3EcJ6y zji3_qhJ8U03K5P!a0qM>i?Juw{cKKOs!;@17$2S}WiKy9JVgYf*a@nmFEVCk<9`yE zk&_dJmpibVb(DD80oRj!#@yUoExLMfH@o>>-)L|bQYsaQcLgpYFT27YnLzg)=M+kiPh>b4BS(qq>dro2VB(` zH8q#9d0O+F#0EnS1>HQ%9zk4-Yc!*p>}v}G6n|xtb25n~ok_7gb}Z;hT-O%dKY7=I zJK>v8e>doEqOIiNiD0dx2lC9Hzk?VSdch*(2CPHBLDw<-`33JrX=wtu?o>o7(M~0i z)9mCIbg++4;o^xdK-fbh;k9Q58?LJ`;2#Re;EFU_fl2?T0*BBt4I}`x9oM zhzJ39J>j6RmnF6cuB%cB!k5O!Ed`Z+g+6UrR$0W8y-kvk3{Zx_9MMa969%h@TkSN+sF1HJ9mWrGubk=S;C_HGRa z%Tom!SzR#^5k8=^g3x!^)wu+klb zd#QFdc!wmOiAYBvLK|dEoLa=h4yOKMV<*ib?@Kn(b!DoDkdLV^FTI9mxxXD^fvf4Z78 z`tY0RW-qgb@xQGNO?T^VCLz=3uu$tzVo!t_mcKOf1O%Wz(F++QpktNYyH_B_50dnz zJeO3%t>A7LMxkeZFi$1^fRuDdi0+tmq~-kvl2!->$VU_~+pt)*cmBKCgg6_fwU#Yj zD0g@Bfp@2OjPyV`nKDYd=1a_3FBaetFgwH)9}*9x z{{3oM#yVa3>45Id5p?+!@Fds7{N_m*Q(sG_|I z>cA=YZW|xEa`W1?Z;Y;55GaTTFIlJ80RQu4A0D=<)(#39F@sF2yu2LV0;>4#BvKI< zd}l`2?r=K2>Q!11*0vB+zRsJn14>h%EmI)Nf!4LhdEIPTOE##QNTneemtcVCNN95D zu_J@bnwzWc#x#vv)`$J^#+;B7Ic~^+qY=?L0fygnY5FT512BMfD3@8|DY!dA7I&20aY>&d2m8{{BCmH~^Bgo`tI`J_N4@ZbVvjBz$ zk)FZ0Bp4jv;*yf7IPei5S_+>72o{3CA0b3${cWaZ?BonOTpfaE@vb=nyp+&?a*n?v z`VaG`k4qv-{%`@jr^vrc%1aiT1jiJ5#(Ag$oRoO5z!}})n++HdtG#UOl!8eqcVYbP z*veMDu#c2~xB^VY!PszZkfd^e74Y#_DD6?yYw;m@mu@1wRmC~UgNQvq>#uKZjX2Dc zOrLTDCjlhbi{zcZQ2-!%z$f?o>(WvVAY0PX(vXqx-JYwjc*-GwF7W_(01mfVW?RA^ zghjGok5EpsRl(HBa`+$s9`YV|>rZ=(Mu`wp@X6!g_JUp{({KXV3CO0?@ht4mbufW^ z!%VUYr@=+22uM)&-5BTe*D#(sP~_zanCb{j6<{DDg0zBbBUbg=Apqmk5U&Mv8{EEf zY{aL(Bi9+BrdWMj;6CvydOT_#G#|};L6=;&Za*}lH~>gW1o4xBw{H-bC_V9Tfj&4Y zhdYmZOgys?AU~66SA^SZ{359j%B1Xj@tW8&9)t-0{X(||gaq6JLki9d5GaVLhVT#8 zM2+V*vc-qm{e?ea$uH1mq$3EeGV$I5G0RhQa(4blql#mETCyb zB#t8a31~avkF=OF0Q$LYRx~CcbjJJra#%;V-X06vV$S5S&N=yPajzR9v9tQo>(@8y z^Nwb!v+?mTGASe6$niDMIHH)_zkhkMnTtih_?iSI&B?5H`Y&MCQJv>vckSlQ($Z4p z&K_OaUNi5}5e^D^LWu9C;z_?xqZOmQ8btp=srkYCit((*%brf77xB|WG9giE&xMr> z$wr1nHMrrkI)k4HH%yqkrjlFoqdU$i=^5H~oJJY4Kj3io7BLp+qdP#`5Z?$V1OSUh z;{|Z@L19k)JZtV;27)h-u16FP9Tz9&Q%pqZZM-B|u>!8b;`d7ZO<> z-eu_?{SRvC!r-5gFG5WAz!6o2N|1EVI(~JGRjU`I&Ffo`5#PD994XF7^y0y{Ui=rDR%pJ9@hndf zId8lkfi%G{2SIv9Y+JhDF*O;mc5ppYz8C5nJ90=hlShw)Xn=!El-(QPhPKlbspPJ% zt`rJkiV{P8beulP$zl=n*ndq<^#;s(BnDmJQ;3&I+A$o4C`k8|!)Yt3efuM7WH~BX zHO*oV#T)a$a^Sj)Snz5hQ@!do+aBSDjb%6JcLPwO_!&VQ7$9k3+8 zOAbMK=lL~uFIlp^R2}`v!{5z5eB*74AGWfO**uBK%^ds&SDeEnL4mRZvk&0}ufgpw z@9_ui_IPVPkp=F;3FgZn#wQ>_hMU=XUA`QgZMIhljk8~2hvwPb19+gTA6ROVsP-Z@ zYa3*>X?lJ+;~dtx+^K9*hrZUUc4~nOIFm$;;SveRAcldYxqyeg|B2@k*ORM>!w<<$ z1BEknPlxypm2v1Gq+v1e#R4Q>(w~7xpZWq!WQaM1us_%sv!8=)JZX!kcB-nXsw;gs zS(QAiLmPWf*xJIOWcD&e4+E*_1p3#QveYIe4J5}>R<2wLkH4zron4ytV@k>`MJc*S zdFiTZR%P>bnmea3+whR!=`&!hM+qGD$cjnpnyp6o{p z79JTnO-4p0`eN;|nC8Bpvw-v*@tb^5H6jN`=W(d+3(!o7Q3<;CB~WgF(saN*(G5J5 zyTNzCXuY@n;kUj%hmT*Z6On*UBA(EDT@Opv&lIA31bu8*bEMcoWU|fs)%eZ9Eviuu zqo#OD9K-<#=FiUcS7+;Ak|^yp^2|RziMVTz!Dov}5;_tGalD-aYkIe6^kMoQu2!WR z(kkq=kC9?YrWCY0#bE#J);l==AbmSw9K}7$&T8%!^6w`lARs(P=j3Fk+tl(TwLdAG-+_$Pr_FT&Dox$ zj_h~-^}k=cxY3btJOs$INVeRornUo5{Q>j@1_lOqpzgHQkAnzdzp3eZhS~L(Re!Bn zwTeGQkaN$Ph~xxhu8aH9kX@fsOcOr){D{5;9wv8i75W}&;B@uSYNw3-jAc{ z^K5HaBor7TLaa=MVTmw@)HCoRpQmlpWUCcV?%75k3m*#FEt~ z8#&pjTFg1;6bJWdP{pMZGxK|uBP@O5~2tY+PLO9t3%3wNA-N9R_c2 zDxox8g9rv2C1QNJa%HbsUFWFmc3FEW7_=eFwk{y5Vd#|tk^e=+k$a_HS@a4^yRrk~ zwAqxW7=1`I+B$d^#^opWnjK_WnVXlas%>QX?TEq2hoGs~g-~=&9r#51IS|Op+t$%? zGKAs5LxHctj9ty}1DKLO-mSem>y7q0C;Q&am-IyOZ#|&+Wmq?62x4O30D}p&lB=I` zba#ZO0rO|ik-6FSCLhR8gmsHL?3;x*40(Y^@O+WXWy*4#PY{^64u8A!au8`PsivW+ zgdz&H7V)^j;kZ#S0`DJTNA1*EVqsx{V+N6vDjm2YPlJsH5U#eGp@_&xJbry{B|{0W%oZ-#?K zIs+A#Ear@jmtnR_)+tAogN>mt_L$_CVIFF5y_vO;tup!GX|K^k-6ST*)TzXe53V8Y zNSKGK%5GGix{OV>yHcnIX7|hS_UJG{MGr-}Uy2P7>bEYUBG!Gmzmq!nI z*_UF&hmPSH=3AKVuQ_DY$c-EsoH3;shTFe<;e2d-5e>{y^nC*TCCS^BXfK~NS3M}K zo`I?i*9`0mL$Sdjlg37tZu|pJEQPKyem!(sR8&AFT|se%+(M@&-AHcg4j4pi@?yAh z&6*Yaa=5qUDoDVZ=xY)onv*@dV+TdjtngMq0F*?tyqnFQ$*10g@aQsf`C-<0H0lqfT5%OD-Ly zhL;D{fcqhyeu4OzWrBiYNDPA!M*R?8xFC3Z-o3o0UP-5dcaU`n*BxZ;d&i!D7Q_XQ znvCi~vh!-zM+;@ce&Rm1K6KgE%S0d1+IkyYCN1K9d$_3k>3mYgq_?Je${la|8ES%< zHQSo_S?R`)FGIC-eb{}8wUN?%o<)ln*CPM-d2{m()Y7#uL&ZrOpyr0do|i^W`_;`1 zeJ;=PM!fE`D1tayS2LknT?6v5$bvAXb)zd5jQyOP^6Ae5#%~7wYl^{zuT#t_nsxEd zis0vjk;K`4^LNnu?_X-WTEMLA#qaTB!b%1`$$<&9=tqygz8XYknB>Fb+x{~(O{lO6qEU6w#SsLEh7y~^N(HV)sQ`WlF&IdEF6VYU$r;lm`-6b~gbX3v^+)Pd@vgYgaf zYMkSxX!TJ9YQbAYy#(ELx5(M*pPABne8k2q{KcA zUtVc!N9!`pgyGeUEN@KZX7S)leJAsNt%o}hNqNYHIsB%=X^3^LqT0Vr%s{}0(iYW2zeL{`@o|Nge zj{fkqQuD9{4TIKb+rJH?mR^j(3S9LPUybC^z<0UC_WSxA_y+8nfbxpSZgGuUduMnf z^qd1uy65mm;#DLaan!^mOBNQm;#ds(VRL-YPT>YayKUsU?qtGCGqC+`Y_d5I&oHhL zU?FqRP(8yI_4@Z2x}FR*sQSmzZfQnShggjlSA5BZd#}-h%&chYy)NxTf58{G4t;(5 z8M0P~(X-(XDO4Hw`s~KfOGMK`Dhn)d?xfWB5Ru}PJ8*z|3%8(MSha}atZRN~6&AiH zcK=(5A=k>tM4+Ki$1IK&&Fbd}V^hO|CK05<`IMJfKPKZu3TwzP(L}!$2X#RZU?^qH zE9=hUal21O)u}t6bC69B+Ibc+kC3rr1N{Hg^(Ej`@9V#-StA-G4WgaO*hGb_GL|tS zGZ8Y>R#GG^6;YC8Olg^iWr|QFREB6UQyLU0L(1H;SZm$Sw|)NSo_o)|o^zi4?0PoK z@AnJ!&9=@UQciEFsB@nmRg0lTdRm8^PPcOcfUme6nOxW*#`c zifZfr<82tpw=Nt*6RHo^m(w9xf>IyxzM z_?L^>t+il3YHX*+1-S*6QE9L@ig|ut*ck( zlOy^KovH`%ED15XT$Go3bXZvB@Oe(=VCiu6>g9rh2hg*Q;ZANuB;$Tq53-yJt@fZJ z*oO=C$iZV*Lk)&C7&HV_y}+C)n9X}ZqL9GKMqDUCL9e-0U1+Wx&-wZ_q` zN=~d)>ExJ$_JYzr94+1<2VAqn`EN3+RjBu>48?~SWelcf0mSIx z?6Fl;1Ga>$@1jPo!FPHoxA0idq}2B9w{Xd+4K$wP=$q1Pnnl6hkjB*+Mcli$Zx7-9 zVWPVAvx}e(iJ%tXauUF_!`R{-sa1&J*atrxER*U$@7-~7WG_%xQ$u%%fk$-<&_{!oyvZD;oviE>DvQ%FZsj(~>+ zXMcT)Bi9n-^(kB&YEEce4FMSD7Zs%fTV60iRK^?Qw;a^f6$9enu$By1w=8iN!J$P) zfIZ=Iro`Rybb=9V7`xSGCVn3z(yj#3eqBc(YUl z5jMIhO%JKWZaX%Y%rV2F2yl-Y-cSt|*9noPs$;gZlCT``&&1X8?wi9onrKHINgCh) z2@O==qcxQo4!VLmj}y-%jt;lmbyBkXtCy#W}$ zot>R3&XKm*O`A6#un+eE^xKlCwlGrBVAx1o+MdiQGyn@hOo}M1Yinu(v3cqxX{M-| zo4_9!HJNlJnM~XC(a|*M!)3d(ZV^9@>EEB9&YDSJ0)yH#sj_R4;8!$(c>w{I7m-5R zA-a}6G(yh~Y7xXBq=oUfnPb1O=;^7xOyOXT!DwiV?~1Ve^0FG2{&Ms?hf!P=x^~_J zk*$v4%oLDMhxYCBt4j?inmG8*xIg*GO15lRbhHMg7vU(%9XM=ifx9tZ%TsAsi7h}J zvd=n=P*t+)gLXcor%a_L(7JSmL zY4yq?r-Z@jqk}5Ev`44`QOj4kiL%{pHa0fm^lQh(*A-pQtN9GP(J|Y-z0g_IBZ8Qww*jb?U zRYrz+^M()KhVAkT;>ZahiSe)3L183-4?@--N}k(K$8xjLQhdRZ6aTf3T=RHkSI|9d zi~REIS13qU#Q2c0+XPR;Yow*vM=1kgGV`|zq& zpaYs+{C2v%^!yq3-i8_A>iL?G!LJVz1ylbF&$y&)la)nM=^SiLi=OStU6~OUb)3_u zPm>9;Xg)ziUqlrX`Tk7GR~cByXJX}V%{F((Y0g&pH-IFzcQHQ|)WA_YyJR=5(?-Sr z0(O41mHAx2Cub;LS`DZKz+T4d_t-1VUKiY}ey2MP2yY=PH`8Ku$%Ly8zmU*1bR{;p zvEc}`SGv(!W~5d}jd&PqZ1ECO1C!B1bm zUW}UWIr8fo9-Tgh^sJ~1MU@cYEev^IlkUs;Qjzu4^W1?O(xa;-Y`+Vy7&k6%wgo@) z><#go)mB#e-%~uVjA_&&DBuybxAZG;hC?Vz|1VOW#|ga3o>i+>@l&AFOhfM3)t{w; z3v?9J;4KiLJR3Z)Am9>s-P%{Lu2XIz{H({T70h)N2F?F3t<(Ux=wTGxE z3vqI=+>v0Nf`S5Pm)|dy>u8MaN)M4%Vxezyn@c=}ku~#i5bFUuirW7^gBSK#5i)HO zjg?O9&Z}6%Mqdp)%nno;Z`%{h#!OnZ+{y#B)^ov~ucx?UymO>`6m#U>%Kn>HGE7@Kpo`qF~g<9n@s=muj;hvAXo%(;Qx zn|dzopHsF|K`MC~mYuu6FPxCpW@uob{Lr;i3}IO__u=wxa`G*sag}Doqg<1H?=(tP z$*QmHsF&xUHqkxX--4O)68KE|66UK}H-y_1q$;aJO{Pq{FBgsKed2Zr+WLl%DvT9)Uopdfvz|lK8@fxe|f3YmoA` z3Erkz6wrLTk=jRq;NP7zVKg<1KY0yEwangrTU$yBiYWpX4Sk-0HtjNKSz>3B|9Em3 zSJon-ojl+(@jNp{jB>`o{2gF+*3~V5{$(}=q;@=d0&r4C6vP0Plp$6s@+A^t`1r}U zCwXN(_EcLOEa{~$G*+&o`d)|1YrgM-(xG2Ca1_0n#8T>2n2W2@s7cG1Bx#^uuth-# z5lscEN5ijRvB5_RLn4){>Sb5q+K+OmVT}c{JGkfZTl<91OtaS1UvU~xgY0j_UUku=$?Gsk%uMyAfo7Awzk^v^Xhd^>2FAyo9Hi+}bk+Cwa zt}T)=fhMhKwu|>odG1$s{f(T_JIf`%3v05C@sR2+E z9(>J^X1klXF2&F5a|zTq$V5#)_W@gew9YW{f3yH31r8^m;Of=SD=TM1#PI0cpb&~c zhSBjY;w)FA_rLRz+(`gAGV=r_+q3p|V^4pIC;tcZTbaXeAy^=-Hbe;&tLrpiMQ59p zo0*O3&{q-08GYLt@Irp*n-I(v0tuQ{&oJK>_PAOWyY(u9DEwB2t|#8Vir|`g5%LGg zdEQqnYY_Y+YFak(bqYNA(9n<{PC4B>y15GsI2+`Ykf7oRsET9~ zkbFlFkXuPdt!O(68PhVN2SnRgfdCyGAsG_^2|0N8wqIX_2_jR8XdcO)MKhxYX0v5F zLB7m;qXP{`60v*3RSkulGH_sW<|8@s%0bTdLpL5^H`;74Ume?(WH{Z}MkQoyK|v*s z7S(~4UA`zTmy_|*;E=&iw0?WcNW6X;oiblj#OlkMNbOei+zKl{W5jd>b&$nC+FWF& zNMApElD)y6>N9>U(q(|w#>rlsW%a~t;6p;TkHJK%yL5u8cB+3B75b~wu*`k{fC3x@ zQv)vI;Q}Ft%4DwJN#7$y3HjEB+RXyOW2W@HitQX=gLUWEEyRJD!O3Rs4*F*qHk{*R zyk!08e9MhZubUQ|J1`ljoN$Q!wGTW@09VGcWHuUq5Wh(%C$?_ewjY5m>vO%M14aZl z5@H^KuBT5orrV)}yA~Xr*`WBG&{KfNvo|dUg@eC;MOVe#<)?UnjRj7Jv&Um1+w5J=Gk9CJjgN`{D&kh7|UTX@mkAh#10N>Q@n6X?I+SeT!G z2_JHpi;|aY-;YmQQ+#Ar17t(ILWW|vj5;D-iJut>z(A};z~NxwJ|huAWoPy~Pa9UE zBtS3%^qBk5z>*R1BrO4d2yeC5fc5(f^M}%KR$fHeJ>LninE&I)3Lra-Mm`}DxaV+c zMeuRyNCmDYiisc|phmZPF8mvsZ*3^)(6Hx*3DKcp0QL41^(IsYv-h2qJ{HF%hX={k zN2ICXHe3Ivx-7;lge?Nqn}6nhNJvPXMKtD2-?A1jyH;!3odm%%^uCvgm=nj=?o*%r zbVvHDkvWQm23u{!t4uBLmLt*mG$=UFM2JR zWDLS`#qu<~kQN3)7?3j&@`#*7AQmF(TA4t$c8k!9IAQtpGFG2xyMGSbJluVtP3OD9 z-Q*uG|L_x$qZdgt_{%HoQY+POj5hiY~jT4bVa zB2fmw+-G&txKGUX=|Mq@+l2@s{6mu64vfeg$Uhfh z6QkxcK`gbj#Usk+DoPgf#jElaAOxVAu!RYG>#kiI_(o*h42esDNMJ4nm=wRY&-e<6 z!zkE6v**n_HuU-FtFxuxDTw?@Oz+-x48!8Z>LKwM1ilgpMQee<*@(sh-s`Uu22m%; z_Hb-WoP)CQK4#ot7|5rigBRYBT&TvzMzVlGAa-C-L7rho&u@lN8BoEYoaShPA4V){V}E7rIBon6W;Rw7NHTGBMcQ6F7PjJ z76431o&5EBSEniv8p0&yQy7b9&z^l0egyE{&p?duK@*R@q>Kp){*HHWMmI+yXJJ4vz2<=D(j+O_6TAg8XgP}B(EUM$Rs~dM$Mip>ox2fEqJ9E1M zyWWjcJ7q28^KIAXDs?0K^AKI$ymd>|i<2-h^L&q%G>X|?h+ugM1Ol~NFD&)(Sg#9Y zo-zk=%SmT~8w4ipCglBOPKdsNe$=>17>MoO0mQnFT~b*q1N=D|sdQEyT9sf(lDX*@#Flv!3ukR#C2e<5=wm(L})Fcf5Ukt{xZ- zb{pbaC-3-&iPM)r4^p-hLhsb!4QigAkKz~dL&8rVigOiU17%20y#Q@kYky;p+TFT| z@V|#_ca54^VuxF#bWDiL+ z!S%9VOiZjL`QS3MVz-Tv(b22R@?=&Ckc$IE0fElk&YcrO#0ANX)trusbOdq12%}7TW3r~yf%8UVefmSgA&nOP( z3eLl;h$`!Y%IDQcv>J2)vaX*jHim3e(yT&dAR;P{xeJq1Q{lzM2)veW!m_*iV(ai> zy57&c2x|)qZQz0HkqV5~FcGXTvM5DG4bJn^ZvifJ^5&;6k7a3GmHP*MlO*VyCSySD zt}Q#woBz0dvQ?#No{#OsM$wZ0Vh?jBlvR8>xX*d1hr-2lUbM7Cm=&Ek2pNoV*n>Cq z2sRj_Vc50VK;u^$E2m!k8LReXU5`YVS6H#2(ORl%7d!JMGgp)Q;O_sh9{^HAMr6vE z$|Ix^+@84d_nEo>R!z?OL(Q)FNIWyBq%c72QtXj{!;AU&$r*yM1_I&BW-m!wT1wm9 zss{f93bkv&D`lLY%<`Y=q^|tLExg<@)gblf`{a^Ymt+zSs*ad)&&u=TXcmwzam=@U z#rK|P;P)(n`PkxYNDR$7#hv>8Y^wZR#jk?TM5m;|i(bSJ?5`~jd$UfvH~*p|GlZU{ z&Ivqho-y6`v<^LxVb2-=fANl7cP1++iT4AI^Rd*KUBBzU7|Q?REIImj1c1~X_qvCK zbRnG4GW(NgD-9;Tv!6H4`$@fW@qOC4XY4!A8`ubUW{TjdD^uJVN%Nw(% z7cN=iz=|3GwnwIO5m=bA88ajYrx=khIf%WOO-R$)=H_s$uyo7^D8OZfgtTMQ^q^i~ zW@L|`sW2M{A}t0$rU|f|VUuauu^eqwe~Dnd(Vq&2pTtpm06He;K2*@DU^^oVa33vL zwCFF2Q2F8P5R_sKPyiDS1joQ1X0AN%wsb;pjwJqj;yi<(V&f!qX3p^W=g zxdZ%FEpm?0`fo};_|BTn;cwPsur;xP$!@@jM~8Q6Ajv7yrV~?%1&KkIIazLexRn_)8%W;63F;0*1NkN7WvnkDciwP^u zmaR^?51BTS9m&jNaGCp|Hw@@RWbHh11uNse+=k@G?DHr$B_i&?^@qNBe!j3Ta|YfM z8dg4-;VzSGU*v2xW_UwH^JgxK&v;~;A|Z3}P2vx2Hf)NN<5{vqpricUpjo6MU>B6X zm55Bnl^%#exFl@dY`~xi6cI)PEyh7E(1bqN-_a*S zz1d#;1JEuA?(Wfg>dGbz4AdfG))a)24Kz~??iX<3`f_}GUj$yP(}>zB+)f~MI7^-b zFNZ_%8N4B9n{43qA|HG6a)&n7VkXu)~201 z4eh56dzjyCiTqw`c+Y%RE`irU`yI_GKaZ{u$9(wl* z$RkgUX00h%aNf&Y{#iE$9FZQnB*)0eWDIM^U?RJgx%r#*6%*l>oe%|>z(z&9TewF^ z5))3gBQK3xC!-Iudli%_tA(|u8w9va*6mz+Rr=zA<0`M{27}SJg0pFrV>?==Hx&9d z7b)3gck*F@uxs);;bwG~VC$^zOh(TG(N=05tF>rQBSc6Ifz~!1B8~nd&VH$JU3j?P z+;IC~-jrXl_eN_PTek4TI|G+uRQ?4>P|F?x+Zi&*`wtEF;dy&*db%YME>0*;(i zoL@`B{#K|`RRxH-F+h1gj_on#@G&=c_heKYnLXk8G@~gEZED7Z#3V-HIidSnjva*q zWMz}AWrp|fj~+PX4rktr)Ze^(3}Ht+D8^fF?Jc33DKGV^91CUok zk@km3Wx*Cjh*ZNShCCN?52F!r3p`seg?IYfbM;`7+Ii~pz7_1sAI}Yx8iUJj!}Wr^ z`EW=XOwG-kQEkWk*kn%=sJtN~E}jT4%1yMPsK^@8?gU7jhM=Sc3=FZ<0e%PEvqsjj z!4E}Vh5(fz#I35Pw=j=h{iXS51)v9d^xjYVbQ_ao@b_GCp_i_uYeu(0UBtgl= z{PCb()kAgRf(7nnv9rk}YO-aqm1U!DBPkuG)wq}0*FjI9J0QHc*JZ z*drMn#(JZ&BF-jJGAkYSxdV~?ks~w}R+aMo;%u1|vad2X_gEX;e=GBUTVg|SB_~|} zZ;xu^?oa@n*9b;_=tRAH`5rTN`zn*xEWzQ zAw&9Wa2dE=7%3O_L3aVbxn$@9`)hRBSNNqf}qu(6ZMm|Y7I zZEcIk*o!1)2nylR^P>1A-<4H*a3i;FSm$cdMBa7kw&WYpBYHjq?H<0z)>1VK-mPqq zQDRL_I%!Ae>)|(5Wos=EnED9N2!#+)15+xhs?;`iLIBi=b_)=s?t#(q@fDm4*%_seA`3>C=HLiO04|ib{(*tV`kGQMY11c-uVM9*36#uVG_J&3 zCrBB{^2Bq(i9hS$)O`ayx2Mv0`G8L-$%8lnwNp+BD*k z9)`YdF7Uug@(pkt7C5&qz;P%(E@;a>bbk6^NGNe!`MGA((@XG>2`7>Np$Cs#)+^=K7ZHwSFf`EFgux-BQQd_h2U zfCA-P97vXG9`4lCR5zeBlD^k^=_fAQt&R=r5f2tUfq{emKjm|Q4`mKq@bHD72P#QRd_pL{&@ z5ApKk&Hj}Xzz`Vh7>e0E?tM*a$JT9|(yARFq+c`qoAw`E6$LblKcGR}L{5rU&Hkcf zj0U7G0McdwRh^54@*Jv&qp%~LVV68JD^p?%lU`UmE1#a+b=j>sQPgOhtiFUP>u_&0 zUykpKq!6}@Q+yuDI9JPg?OPGdeEZ*w)0aw3?KeX7l3|69e06`&S4{`p@0$vGRT0+e!}E3Hk<{Um|_&bdx`P!`x>e~d{|Sl zurr5A>p2}?&uoz6>^ynG`I>M^W%;$C@zI4$)?}+CRU*t8iu)7n(fo{3Ux>J{mPFc) zOO8H>@TuyGe#>ln=$$cvdqhjhvYBOON^R~IA#^|3;+-6#=uP2~MJh+jxkmijjj&|_ zGG2`GBg3E=R0o9uGCsXF!l_Q}Q4WDpK{paqoai!uH&aebz;hRfAyMd@Nfu!{Qf`-sX;O5{Px=!*7zycUCtc^`xwzqKq zp>bLS6~z+|ZEh$!Y5pY_Wiln1-FcH^YQ){<&ULxKhi4U zh$7wxlKYJ4ZoyMq>ObVu266EPz?66vEU+yzSWKBUcab0X2>j0bRNt2QC_qqc zG$ObvL;AS5OzGg@;Fq4B0FX5{WEz2xO6O#B^&Sdv7;cPPU=btnL7i%e&@*! zL!EE1lfxncfN#M=o&HMz#YiMTlj&eH8aEg*Dqg+P?#a%AjTfUIuMACrbg#jd7)Cdi zX%W|O1+xxtC!Rvu=@)@7oxW3xL3$FgP~DmR1oHmJ)seE=Vov1`nT@&5+yjM>Px9g7 z#BKO+YXj7S$h0ORA}7fm&?TYkF!GXKPJuKE+?L^hk27X^q(loaS4=DtL^H)oCw z$buO8vy3<6T9f5Zo;-Ov{Xm=ph8Y6o!-iJpZ-v#`k?GKoGNnwMJ9FfJdp<&}9Wyo` z4?*U>mbrJPcF)AY5Gt;M|G2p%Ns*O4e+xI0CzA~o{Wb&-gISylFp)4lr{)x+czugV zbm`IDk;Mab8gs=Hy=nTW(yxbb9~NN5 z$3?)OB=f@ScW{gk0P9OtzVIvu6-@-M#fSFU1rEZn? z%y1!Lq(+NRo(=is&7PiF<{OnN-&W6hLoX>P7oTYKV4JQG&*>bqq-(8pO&>Q^bH7cO zNRsk(T5VloP(000Tqk;tQ?+vxqty>s<%vs5Rzd?pJZTgeF}0r2e6&kdXr+g7z&*n_ zLcEbGbPPktds$G!`(K2MV=E)lZgPe7IZxPdVR*FcKIs8Gg)oI=v^iv3=`Y}67{+o* zg4zv~KPe5-ZyY}5kjFdpYzy>-^FTX9*6&9LGGFG^OwPmRZ$Ez`D{*zLTaViw{Cyxm z_-D{dp(X)m{TxO-q!lDaQ32%NcTpn}45aTy79at;&%I09fwraCgnOWGQyVGOQHk3^ ziJ<%hmDy#Y99yTjd3HYzY~s;H)kUsO%&0r_c*bX(&@bxh2y1a4Gu#Q^j_e+gkV~_z zC!GwpOu2+(mS#|t^<BT#9HrRtJZm*)kF;4Vph!2RTaXFNN_w~31;1G!chl6u^sJC$g!gR^mb;&a2tK={g zpMA>vIx|00C_+33RreeAO!PICN7`JqM&^VNw;)8gjVU$vCDUd0@@WJboY z{wkee%hld&$tBiQZfL(5b+s)g!nr&GH{!AM$-|3I4(w_hFP1YQ4M6pFs>_31v7J>i zC%a$r^}vyOUjLA@k5r5G1wd#Z*CbVt#Y(%yV1|~lWj3~t)o%I^ zqNT$Zub60O^Y7nZkE|agAsab8j<~zxWVcqko|)0#K&+0azW)dA&|F(D)S1k8M>wUL z_vN(y3T_nZ>z1$rc=$3I1DD}lZUAzs3r!tlRH^_GN$yUh;tJ|*({f***ga=b*K1)a zF!awiG3#J*l3hOWbFcWd(Vw%J9;Q)qEORa`4qBQU?FoKw-RzJWgZb6*Hw6e zYUon@A>Sm3CO}-gycLDj;=g~n0_;=;@$}Ix$0Sb~5f2@uw_5DBUq$;D{Q2|Ig!Tb7 zP&<`(_ncY75H4mf9F9MUn4Kdo7MkOm{Jq(kTgpF7s3+-zCALBdxZv{B^|x-_N;b}M zu(!VoVX=rmA7<0ttUic?%>?dvqjhKZ(hdpH+`3g>59iLUr=RD15Ui&Mq)(O8HG7N? zQ=Q)k_jF}`c_pDY7+0RLhZ_B|Gi)&OtqV1UM(^o29vs&C+q<=fGpIj0^-`QxwO3B$ z!o&}LNZ|P$9=!W0FA%H-Q+*H$A*HGp_jonO>nB8x-{(i+2kB@hWBEsuOwt3LXnXJ#B(_*To%!5f$3C}ZsN45RSxg<@bB*hC@y*otasN4d4iy%r^e=Lj zIaXGW&aig={p)?;jhE58+~HCD^A&Qhe~(mb0A=u#hf43f>;#v#HI(&|4!yCD69xsX z$3A_&tYp2a;dqh)E5}2vb6mRj&d*En{QVQcKD~U;l(=i2=-#W0&);5JaBSX(nKP5! zB#%6RV5giJ{QsPJ=Yfaa|c};$(`G%>go9im=ocl`NiSh=%Hfmwld{_I(~)-~}XZewptS`%2CV6(xO*WHp} z0qK2}bpnpjElbRvJRU1N@_lZc#*SV^#rWod&;5*mBbgV2kIc#zXeg1F+y8xGceTi- zE%WEioOx7EsBxRDktT9H(;5m-T>TLvxLn%&%8_ufcPBcr7Az)z?6A|&c&?VK(aSLE z-{CrYQsKGOdP>-1kLR8fk~h>ex&!$)eD3<0Q~E_^P(ae}SN#6nvL^&}#r~>ztS4Mx zUT{Fub^MLO5GMF!h^U0XMeNi%lJPZ|ZNRoLdmv&!ZaYccdKm&6ODmu4x&+bes$`| zh*((pj!T>Ls-L`!9;R`FxAfH0`G^dwLq?&FxAXXpfT?CTp3W ze=qGWttaE#7p{fH=fu{;{1=x-nkznzR(zbfZ}(Q}@t&&Srtb``==jFI&+FDK`rVeD z(AanM)$7QnU*EpIJz;(3=?u!xdkXVHf){>xU*NQTQX%QQ=kB_7R;Nxyv^6i2r_luK z??smF`1ZA;wPv2YwKb(8`OluL7c}xPbKL0{oEaA^z)K5vu`Kj!`22ISgnM9@`+%6w-=9(z`q%nj zKm^pM)=a+E)*;9DM>KO!ioW`0Su#DnM{?1#6Y8xSLbvUln2tNWYu8F8Hp|q~?)=?H zG~vV&A?eU3tQPO@OBmeW)Xb69CFcS%vKFQ0Z|{5DZ~pKUxTihc)nCe;oQCwYXAgbW z=YP=1L!U8gd3VdaIs1i|SGuQC!`%NYK(C_o3yPI9BfO2T^gmXA?6D+9$6-0m`09vo z|2pl8p)Z~`KTHHm&Z%f9S#?`#@1Cb z)z>d3xEQ-1&{anW&eqQ2=FKDRLW_p32km=+&=LyVf}dCM&se$Zp?q6Zp-ZOmbG$8^ z%p(csqos$gUstW6vTL2!+T?wH)7Q)cRdw&>OUW8kO1$Go`GUAzyq|nn6_1T&3gW23 zY6EQt`?AG!p2?_(UeS18H00g4k-22e=DpGzUOAAxt(%d-1I5+4!S~OpNNU;TT6A?s z*Bw{BVwdQTZd>E}-Xt$0Yqs%$+Q)7Yf%P=~71Fs+t_aMs%O;eeH&NOPNp8h?auKOv;jnCxaO;qt{-0Aop8K z^VD{c8Ed1ZRB~zD)p(7W8oQjomj7P&tUXs_;Pd%i7 zj}y}8YFE$Wnle7O%vmTZdRz^;`}Z_Lm1AD(_|CNWXZ2Nl8@}oK@#AXIC8DBfch@LB zN>Eu({wC4OVbqfcZ^u70@Xt*tYbhk+sZPoP@)Z(VVtj8Y`KkEYYp?Syu?jLmGR3}X z9H0~*cS!c$_!ba8XghL8C!D)!28-s-+9|Xc|ATXDwd9fuLR#K{s)DXvyQG^2uQ2V& z;KRI#+%lqP2F?`~wYr4X96p{kZcRRpv-}UMKQnF70B4M-c z{pM(qUutn$Td;t`gH>eS6)=T+=M;6n)1fSb9$=RVr=zL|Uv)fWF2uSQ#-oLld_hAS*$%o3m`9!YW!aQ^j@lG{ z9o4^7<3Hyv5rV)9Ql z%nm2IULw;E*f7SY@GW2NPn3IfQvgbyeG(yq zgjtM_gW#$f0AoU<;~HPZN~@`KeN)q`$d4c+T0y{gA`t5I@1K@iES9M!<>w@L?8DgHTf{Wxl7@JexHzNm&m`OHlP+|QIz;5ep zZ9%ycF{1&qNNf|2NJFNv3^epY$~;(rj89fMJ}oA&(og*A42A7{E@<&;G7SluCGxVi zj0?+t?Klo;AK(iJ-sK$X?}f;^w7oqc(_TIh{G>b3U^22_FM|^oqL)G2-R$dJ9oWQ# zOGX@a_h9e~M*;hSRG`8%Mj-9L0YCC~(ygx6kZ>r$MRQ5ULFTXSt}c@NoYDg<1V%q` zsrtKn4li1@{p;{Y(^b3vzDvAAKjbGyLPJ7U056J)!dlr3fKe6cX{}zstXBY6lAA&p z(K0d$wqQECd|C~*Ese7r@|TDnS_U*_CA$4Cyg0e3=uF{arjA%R9$OcD6$V~vY5@(7 zJK>=`z#~$Lm=iU;#gkn=1TImN(h<*KFqTp(L9p2UPO1hvrwY+}N)Nz!vdA@G0<&8o zAE%3BDnrxw+Arvu9-y@iEKsz0HhUOKa|O#AHIQp);lX!&8Eh#CwU~s^t_oymGWm_a zba!u%dKn@k%htGRF@^6kKTdZOm|~HX(6tIjpaIe{JEEZE!U7d&Y*aPA`9ck^Y>%FGXA`derb(cgl1;KwUXsU7T8agx?zoAh0Jv(kv)7?W zi&DDa<<$k?WJwQ^DSl20Rl0=Z7b-BL@8-{~rXlOvDilJ5*JF->IKlk+V8nsj>)SVS z=q>qfYE4F0iKtvTuxyLi#*Lnd5S38t0>kD#Q-Ks=faz%@AuCC-rr!Bw_}A08angUa?O*&ou<>QHVpH8-D} z*s!ZO!GaT7v)(^lR(g-LbX&Wl^F)Fi?TJ+=(wi~!x8>WmCcJR(D_#@6p58tt~f^Y$&c5!uv3(%m2xA--K)aSh-|5NX$tu${z) z5@o-x?h+-$D+d;x+$_shdChby;%+XiPYTuQbC-Xdr&JQRq5JUS;_g6;$<}rBb;irc zvAUeT^f>vo(^r)j>MJObUpMI9@=WVti6dF4QlDpYy3C#K~3h5pDQW(O1zU5U*zK~L5BeZT#}6u6{G>K zwY-w(FYk_|qUni#Jso!Ds!q4BtiTO}#=x49 zo@&5g|~Cv&O}8+xG2CA=1^bx8Dc{P#(;bnPWIC zk3!tEdGqE9+(bi2h&BDFo1?mgMgjJ%?BF2g=H^DmW|F8da4?L~Q*vA(BtTX{8?+5# z?7o>74cITLa$B8=c93KWLMBiJM-mypivP%uOgi#MzTlQ%La&uQ(FuDx z$+kh&g3;0iwdy&fhb#y(-CYo= zlPDz&(aQ;|sz<~^yOa~AZ6I1UU z_k!eRJzinT7|M(p72nEZ=W`>(nhw{MJBYp!Lh-c}6&H~xGx{~Q_=ea1^{t*RgYb?3 ztK5|^|18DPn|JLC-k>eWKa8HWEX4$F680!}`d?_~0!pE-VoY3|6gYf;^`wlm&kQb* z6Cde|YNt-!$5a5~`V`1EE4*|&wlM+ZSQe4)f^8L%759!SMSt!4qsb}~8bJ4U$s`H3wi!JLwgkwa+a@g)CGOTx> z|1?SyKt7l-A&B9gIh5vJh?2JvXg`wh-e$0G7P6d$olVyK*=S=~r|0-VMca+Mt7^c{ zLVpE)-%B%q)9(tF3-KeKEh&uaHLV`rPxfWKxbf$wnfqv%g|^Ftul@ojSrSDmF_2&d zHWFjglE8HRFHF)9B_7A;D{H>dYbu94|94~fh?(uTl}?82RU-X~sPk?v@0-90#o)8c RK2h*>P)%1gNBQWb{{_V`YY_kd literal 0 HcmV?d00001 diff --git a/docs/multitenant/ords-based/images/makesecrets_1_1.png b/docs/multitenant/ords-based/images/makesecrets_1_1.png new file mode 100644 index 0000000000000000000000000000000000000000..f0f6f21569bfa17aab7bd37827d97aa041bb06ac GIT binary patch literal 117953 zcmd432Rzk%*gt-xLI^1&5tY&~G9!)^C8c2{Ga<^(9*68CNs^2cl9iRc_uggiP4*tg zanA2Lsr$a4`@Wz5^Zfq5*Kc^8SG^8re7~P@UGMAteqSG-+qYyX4=^4;AP|(-uSwrU zAokKC5X3~J`{6f*D6ACxx7Skgx*{q3IFdf{hUZ70T~&Q{&s6`JjgEyL!obATSnr~x zu7#eSiREL{XA8u|VhF?u#C2&2Mcc61ew5jM`BX7WbAGGLq;+Z(>yM6 ziZ4rC_2l^iPMSylFf5ccI&f|zE^?r)&p|g&*^i&jlIoF0FD80_8}s~CizNZRJJU+8 zFMNo<(Y{?El{j#UQHmkRb!X?~I7gtXU4+hlAB=O-_;f@1Wcr2yYW=lRmj8Buz|*c1 z1NJi)UvEz|qy1M`pFQo#F#K|lmF?#bBmWyT8lB^`y{^B85~<3zVAHnAwG17`hk*1rKqTCSF3B>AXdgC6laR}M16^45Ljn9e{M|c z)PDTZC5Ex)YmAh1xV4mPv(sO<^~xQVADtq1&B1Tr$DWM($($l58eDKzQJQBgWd82^ zB6X_c;swkIdBY|ePs^yKrL?u}?p41fwlf~=os>}aDdvWf-}&ZYkwJ+zQZQoL+U{*Y@?`Rglk!Q24MZ z5^0u}&gf*V;sq3og0k{RRjJ$Nqk1~qtu5A7S)>I=_N&qARxD=5x)bAoix4jp`?*YEOq+}KI}5Lv}|Aoq?uj&Inq$dJH=a+MH{|a zLtNe63EMf?5#OB7R@a3K8ZQ>`+r^_6GW&&{@eWF9cf6>|pY#c-Yvvxxnm5^7Q7wh{ z^P@IcyK@hZVZkD@pBQn^I}Dc7Zm&yxGn$(aRLA23LVcfz*z7?tqjszw7Zfh})b2&3 z?#v->^Ka1-77$Hqm!%=Ej{071yAVM(+~vcXV)h7^sRcKa0zn+>N$SC!NwJTE`&WBU ztaYEjY_noq6TL94$_agVBzD$dGb)c!qcXu&K?McW7%u$tL=o$ubo>emf#`Hz=at_c=UQUP z&4qU*U~+ZShJ05h%~#y0KDN#*bv;Rm?~!lW#IT*cd^D4cY;&Y+AEG3fo=e!V>Aq82 z8@_36W^=mkh@;i`0g6Gqm?VB&YmxrQ&V$g<@y@JJ!N}H`Ik8v))UQ3}FioZ1hDdkj zr}0m6mLIr=KyZZ%J}VmixU%NaBukRiL9VHJh)?rSK>>m*sm|@_X)@Ztz*h5-3ewM! z12z#(bZ8ajkrgD)yapxHqNeCxo2Vic-AP3wdS%C^n0(HiEJt*TeyA6P<1L`r|8jd^ zL+0&_!I2}d0;B}ZqecuW3mvAi)o3=O1`?WF*;D_+*a7(JAH8{uDi`YYqV}>hF#tz7|k;8jEV1g>v{NH zklx0^657b`n4DX)1^6Skqx~1r-@a=wb~=1Jg%Dqu zMAvuj*QCnGou1diA3!XUk=?<5nVOLr#jM%RwyP{DTJnCTKf?NW>7iFMr3y>t8d~GN z#KaJlznoC=5qn|l1XPrUXbsp>n4>_ zIo2P?{iM8F?kjs42sP`KvJ=NUvQh84MQ!X;nOB<@vOf1bzs;LgFh;_c(YH*V0ZEZ|>F;4xaQ8`)BHUWxua7+8@@dK$-Z zUv-ElLK+gMd53IUZ zJ<*6liv4@p*+(qwg*h8Oc#x;-4=*F}ynE;`xO5B-&ZqBe%nw6|BHivpDfJg;<{r)B zDssWkb}#ee&!DBXYy|DawjN1eR*=6TmEC>8R57Q{C{28M`MG|d&^T4T8j97-X+cwZ zzK1`muZ@8dg>JdM>F#;(SoOMWe)!KzltEkXVAiI^x~ZOp<$B;VYRL1m!0x=-n%m0j#B7qrgBc#@2>1OG~C(_ zC!#$goPlcZ^7g^6Do}6CN3Njorrt$j2=V4K=#R}tl@)D@eP0s3`fg>!k*+W@Do%1@ zFlM9K#g#~GF#dT&q6#Ypv)I)X{&;)NbS`3yTOCKdN<-5!we~*a8An-1Qu+8xiuTJD z;iA3@gLWiZ&Z{&TIGioE7;ocvU&U)c!bXNd1&tH2QEW%0u1CxA%ry!wg&Ux+8r%xgYLFgB5F!QNhY`Q8-)lJpQGH1P;tDSlVcjj$ux975tAkSi4t2)1I;s*>XD?HR3 zC*zkG@S<9_KTdrtrYh_3ElRO(waP+O)DphYmxE(-pc(b5g$n6DSI^aIwfw2yV9%jL z+_A|`^J=i~9`9tMh!J~ZV^3}#^EwlOzD!F9sY&Su<9>UGW++`AC@I0M#h2?#Hlwg4 zCQ3@IX02@PTd1#2A`o#JE^vCDy1qH%L;JWVPe}Doe~0dne=8^i&A%gJ-h;%9##xX>wD8yiGBHYf0bBy3S>LxNA}CfeE~@5Z{N zb|VnAoXq-iKSNs+rJers)NTSk(XrNx`L5kc-j!F3`);~V@R0Xo`9@qZp5A=28RbNB zNRWDW%{9KZVW*}MmA5tciF^VK;+;Pb|4d=<58^S&L9MaDzF*G+7(Z+bIGBF1Jh2<^ zqe#3z$HrFch99Naf9=iQ>u*}Rx~9^cHP%+pc&6IgT3rEL^sBWq{09yk;I^4p-`a_~ z>YM7YG0)SmyC>V;G|N9WHVzWpgT-P~wM+MeU9tNX)0CH=ual|PFBg}UMfWDuOQI?4 zGHrZ(yvf>9>aw&`Q0lKRP9-=e$W^dCpHF)5U{!74nI-H>b97D)16RRfnd+Fp!{#e1 z6cKh4A!)Zp11d4QX<4+7DPn?}hQ>7(?aXLew8h?$^uPSSSY31Fv7r7wN? z;2;Y*-Gx+NJY2?718=iB$hkLV()1PP++VfjjSfuLJ)9u~ApY6m6#MP@;nd4LL;^g|0iVwwX z(w@0>0zP-Y8YhGjNU;CMnEIhu;N_~!rDSaw1n%*o%jF|1o zB-hi^6SAIVFfuZtrlxjjXpnDryX@=#@})cV9)5m)1cG(1J9S)K-0dS&h#x_$(X+)-0|A!0Kx^zxp%It2?03m-rK@VA&7 z+>CyI3?yUINPb>ks^NEb^#WU(prD|*loVNK=c2xWZ{MEV4A_y}i@jAjQ0TBU?8Ool z8A&wRj3)V5AZ9deIaWi%B<9Ev9v;rkK+#oTmp+Ys+cDUWM)LP*uo$c~H#T0Cl_g0? zOw_p);)&flclK+cS6Fs+@1BgUdcD7T01(}wbIp+;O`dWFBs&F9dF+BL!2nO zZd^buS+G=fjE#e%Hq!sor&CL#EO=FC{CcmcuieT-t(}8|gD>1p?HpI(>YMkx2Ct-$ z<+b(oggqsd6<>>*Yzk*%XRrFf5E2r?CM;|(y+h99=oGYUQ+aJV#WQiqi;9Krd#nPz zB$H!LmWIh?+fB~Zy4!@Jp5o(YG;B6!`alBA}$u@dzD{aNK?b&lecgoHzkelmS| zR!^k2%y)bUQ9{OFhLmSkrhLc^bAgT=)_NMmi7jPo6bePks-32KSny(%uye^=7qUosur<=;=8($c9|3 z4zk==C9D~ph7Ug8BycY*lkzkt=fTFt#u`{|@ai{j-k43bu=-C>A#gZcc~>1`G|y8Y3uq|vS3i;HI2 z7{)rY74#*&1J8tye}Lj;J@TM7lX@Twimja;Py~|Z>d8n#nrq#OIVz-X?o#=%)QiR0 z`{IMFk8yFNDTV8dh?4qxa`r&7P(f1}Lqm>a&B7PU%NCzvV|#7lTD3dA+!rz%LiF_X zjP<^5uTlsXKBAUHcji`@xPpSeui-AKV%tNIl3lQR2_aUsI~X_nLhBbe)uf zqJo6Q5rN?LLl7H>8-SVkd%%U&qgSm98UmaJ?wK(nQfSwCIXO9|k@~v2+Yl_%yI*3lj&M)q&CLpb8Iqj|4|**}n$c?i zYM83U?Lj8*~*OE3eO2AeS#n{3W6=TP_mbYDdqaUcLDehdH<#JvkC#(r` zb8|vW??~40ot`$T%pn@B@++T7DSo9OwzU_wyDe^O65_04Czp^A18TFJMf+07;#i=< z!!ma=)pUKo>81XpU#U|BHY8zW{Ei8Z1gU72Lc~ZlAFX0F9cOuUcc-n*pg+&bcBN5J zM<6ud{d;dM+;VWj$~`qyCK9DEORI;Z=51(h+WKmc+6bY3E~%z6+%s~bZFc}xecsOMW#dL2A2siQNj8hdrOhFwi6 zYjl?=!c3nBmWzzecVA>=WY-UtAa)M#Ka_ z-|ycevBOl=!d|9H&!6vU2p3UtE`Hp~HVXlif_M3gX_JCaskY0h>#t9d`T6-pzYh^k zp7?p8($W!;xsF@Qs*^)+dV18Zz2O_#?CMfwLOL_W#Kd^NdbO^tzfr9^G1%Wvnb&ur5dr3&jwYdoh$I8me!r@<0nTA(; zd;2wcdH0kQE+OjuC(oQAd64x~LRm#yNd(nrwy5@5|nXJgb*RK(Un*L*BkG!_dn=7OL4P^C4bPn}} zH!3t@gh%Hw9DXVKPJ|HX?frPQ+nD|Ekt2~`zaEOc6)x%IB)Yu3+|t>3CVS!EAW?4N zzoHciwZEg47dx^tGLodOA3iW5T&VX*k}4`H=I{--wA_Blf@FrRAf>AMUgLp@38%2I z@NEqZ%D`IpvNB1pO_<21U8zKwnVC5b8=PO$)0O;_6w%4aoP@W90) zLBD2`=uhOpDh;rNAuK%>GJoXvAJSK^dN>-lCn!Ax6u-O;U^mTTTyDz=KCI<2-w43( zgsR*3(4_}i#4dbRKqq5Xn$9*V81E!0Cn8QHtLHxKEY6>aJ^aU5PvKJYd08QT!FQL> zcBcmf2YUfFsW*l#(BI#$nyT&U=$ltujC7|?RlR_4!C>r9ojP@PUX!A4TI~G?ob0_D zH^}WB9B7UlIjwbU+NJy`uYOHRtOEKK4(F_1?5t6g^|0~)WDc#ze=sT?oHg2M9L zy?2l1@L?!%Qtj7_{C9XsVzzQ0;9Fm*O*7=O@4g3H0!ilI<=o!%?0#SoB^uxhd2FK19WA9AHG8}2D`+SV2bg#eTW z;WN0(`Cl@3zVN=-w-b{lJF+609KUH8rw;~br;d5a+qbD94Y{3^eWeY->?{`-2?8#z zM4S6#LV{6O6CgLU_1XJd!B17`Jd_d{DdF7T z)kVt1#RYG#=`1TN)8=E;;bWv>W;UUp`632)27_DE+$?KmmRhblfA<6W{95OaX9xYg zpV{oZ9cfRd`nA!;ZiSO_NvmFLID_rY?75Eq@1aTq|v zRXe-O-@bnz-&~vNBxHFJkz< zm6EU^5Doh2*tfphJjl$o&c}T3`Jd1oWD;`8$`a@+u=^gfIozSZp*e@y4V0};P_=Qf zd^B4elAxibJqcO$D%^cdUELw&L?x5#?x!Q|Dq2T|hlk6mt7YXw1#SXF?w_ajB&X-} z_8_M}P^ihpt`KoSo;^^*L0|uK7nGN;Nrd8lXpyq6&-b5~l$6}-qC@qP$>-Ipg8}c} zv9sR_J3BHuT3OFkJZpx~y>;uAx4%EVn3x#b>C;y;va|E~Jkv1{5WLSs*h&KKAz*zU zjeKjL)YQ~%0l1}<t*6ZKAM|B|{V>iv;zIz9C zNRL}c{OASz{JaT-l57l1{gxdY%BC}T_j+W|6LZ;v%ri{CE^mq`%5I@>V@&YhD(&3D zloTov>sj$@*GM5Vq&jX{^yQeBCzk`LIK|Fh*4ar<$e&rpp&HxM?TG<_fo=f&PH=Nm z6A=-eJ$LTzY;k-%*_%_W0J;%ly+6Kx`*ub7CB)MU_rLD7FJw+jOUtpI>v@g7e(joz zW{RwZ#n*DI*QgIRsH0j#Zbc;>*OZfABsZFPAGyn%SfGJ8k#)YOsOVl-Cl@Acbzp66 ztp*BSEiH6k(UVTjw?RR}U1_>;@$nMk;)qMUyyNRot$+ITNqeFpBrYlGDj?ALzT8pn z$L8idj*gBkZEfy1f;mec^PO8~IeL_gbuX0Q2!tdlDeLV!cZR>YOG!!bIAd*J9uqvM zqoZSOoSU0FH8mw}WyLSgPJ4iy{P+m@fdi>J<@?(@Iz}KD*EBUL^CzEKR#9V#+2XkK zq4*o-yt$22P?bu$^X@7bBl4x^q9cY>M?i86FBXGi*@9GhOfN{Te?b_|W5{8^0CvN^7|IfRn8r!*Zazz4dM4r6*4X?En z&|=4Tc6M@{ck&#KD_=0^tdasEI zeSo`Bg@wnjU%xK&Z0fa!_>(7zj^vmU78RD_$%O^v)6g>*8!zR=AgviFmDWoY7^x+N z)O)i{SsL_t;jz9jxX{|y_QgN{?Zm=7=B0JP-CqRQn3$KW)c$?@bQ(YK3t5bj61Xg= zND#0c_fqU9PDn`boV#%D+yR5;h#*w0wZ{h>QAuy_F@$z{w>ooq2*Faw599P(7OPM0$zW=0hs$ZJvO!2CGC}C|az{tpGwls3}X+JQlY(^Lo7od0~ z6Wd2Bd}ybB{E#p;P00uT2xzEBj~;0^M~DJL^4Q2o^7?h3^2xe?sa^={FV#%hQooH2%52oEN&UJ(s=BaA3-@X z+r!JtTUq6I%t#7(>;X`0P~Ce5KkYzW&`@v{GJe82MQE)H;R*<-!~*WXCrz5B*d z!_cWOh7XlF!>f*)Pfoe}=Snzw*U_;c$85Na!PM{_$ zTi&dmwS}zk>v|qP1$f<;Z$owe{(Yc&JxjI49UZSsPfw4n7Zr)kcBK)sva*6>p=5cy z9tt|j{DX;!iBQ6k0>9(#>G=?7RqMG#l@ys$)_F4x_IJ7uiJ~cb*UaDO((HJ_O)z_!~?Af!Ad3lULY5==> za0w>5D^-UWva$Kbyv8E~gT$seow(w=PT_YFQodn>azv0ZOYM%x-HBm70BP@`xTa>r zwVOAONTvP3SZ?*q^=5mVx*nC3bd!{|0s`?>dHIIdOT>`Iu0DIV>W!h7^oE*-sN@I3 z$+fn73O<=nI_nmi(MT>J24S<68D$;_zjfx^xpQOdN{Pxomn|o17HDZ{E9-*Jk8da` zDMg5&tH@Y<~`XWPDOmkRTlZL^I%58zMxH+1c3% zIbkyT^wU&Z`uqL%k<-;=yKHZ7OOX=!R_;L%Xkban=Og>8N|MIh;(y?s3AO%sr@!br z1C$M|Jv}e}%nv3k(=Mg|RROX7^Ah_TO*2N@oqrGA{+?3Pk&zKXkc5a0DDDIY2U#(= zI@0tT8MkwybUY(MpTOWXnd-DVG#Ad0&9tQ7Yj=(86+3u`$ z5;^XiEiY9?jbrjc2On3WmTS)--mdH_$My_DR~lK*o+W+K@uf82q-?55zwq1OU}D4{ zR2~0QgI|1GKi8m9JQN27g@r_2_-_l>gSoWcX$kW2i6^$RUAe+^@3Xw?tyn2b%k)06 zHX?`r@1(IQn~9TKvpS6ozcsn&ea&VP3c2;|UH8o@cWSTv*$Wq>uU{tvLhg==iWjdz zQ%$z9gPmRJqf0s~6OCstUaW+w{*I>RD{%8%b_Q{jP5* z5~v^aPwUX;)>b^@_OHbi+xnRMNyh~yAu|LZ8eo_|%TE0DPEub}-4?Q4{OIlDLsL{# z#3m%9`bbZY0Bo{*p9(BZC#Jjr3UYXK^yKH8q3l_vgC;hl zpoV~|5b*ZxSlSjx<@xblPG>dsHB5bFf4)t%EpoJ12Z!Cdrl3#_7hWZNWEZx-h|ryO_qywaNf zEI=3)to8pa7oyMPTh{`|W(m@hfA^-pa0j~WEC?z?BGGgg)!gAK{dHu1!oE-M3PM_d zEH~W?Q>Rn+4ZAh(YH;sgT_yDC4?0kfH(G9kX7jLd@LP884^~`G& zyL|cRF@dKgBbC0x3k&`bn7S%aKyTVkgcuO?I2IPFz}mek2jdEC7MQ<&{YuTmMB!HI zvbky7*5010Sp45CWtOjjvZFF$vJ-T+5)J!hX=w>`EYbr9%7Dde1*#75^Zso|$bbr@ z4+sdboea03W@LN;YX}sKa~%pdo&G&VCe)d_q_5e4^3#t=7`}l2dwKLh%27py{?MUA za5tbCJrxw|L_O>LIRQ0CZZ*qayK%#<`)L(W8T%1k|CSRING$38S=&UOV@5!M&+1-s z%(Q^#>A{7fZBFO4M6F05*?^IOPYUjP2Q1P0x^);5#hg;Gd;rQFrDjURix)3$-@oqz z(pY3s5i=x)shOF3;i!L4ys>{zyrOrtVjv|xK=FrF+E`iXeE)otKvtYPfBp{WO2DG$ z7RJQvBM7mdZ-u{rLU^Q{3Mmbd3j|FnaXT@niw!3cZj?c&hg0M!`kJZlu-Q<_GdtNc#<&kw`g(NojNKe<|=|3 zcrjXHPW7A<2yf5;$U__1DfYX?B_zD*HT|O~Tjqz@gz!GElI&gDR@K?o=It6La|j}i zX~B{wa8?#mCpkE-V>hdj?*@x^)_Wfg6xdn7SPqN;0b{X!gRAfqRH36S&1SpB-}Gx^ zqvgR|%UFnj=E5?-ZUFnsdd?$l7` z&?7fXsX$SLE$#~_B`yJd*D1n&nj_76PHk&vYh}{oRxB-0EIdfou$1ESgZKao8>g^w z(@ZtOAC4S3vYZJRp?k;iG&}o?pz|tq3yBE{?lRFd*(UvrcYbTFtWRU+`Go~4*1Z?#d`DhYs#a6Izohp9+tb`kC1^UZcX4e{Z2*8Lo3j~*4WnP<{(j_?EVIZVu{Ck8tMRO;*3ABCDqkV*^UtS43v(tXJ7?Aq2oR<7JK?63KzNv3s%8n+Lauk|LgKm#)nNiQEU7rQ z2!CGri;HrihjL=&SG{_sp8oqR23y$~83f*Ie)2pJB*T_&s0mh$U1mPje)MivBiXUEIfG8V9eYR)#;B@Y|@lk@DR+1WE`0)Y>o^~BVaP;de(2*n){ z0su_HXnTi3yqH@t5}rU+4;7wD5ITKrqiq9u;Q|G$BzOR?XR@|-vuXJ+8sZbF<0V2l z>@XaQ%gd6|((WT&zm@&^VT6ot<#Q^q0}}*^J9lU_9p?BEF2TX{K(`2(4!r#K?XG&E z{gLtUamcUP3%8?7LGLmfDj~|q%&g63P4q(xTb%>R>QQ8}a20h}FVRnr1Y!)h z-GffBsUsmq>Zc#w+9F^EFR)wxIUqN-8_BtfHrcBDS^V#Vvu>jpFB;*rem{3E7P-TL z!C+nk=LQTHO#2WdU9+iYpg`OHhzU*wD*S?G0mA}VVW&6AXxX)a-~-jW(wD2qyRA)e z%ZFHQ2_^`&Rj)O+L7XiMJ0uSCpi2uoLQhx@kZTDw)EA9>ItZjM%!vslQKuH_INEWU z3~Wb>VAm;sbSXYr%gl@$x9Ei|d30&G_6)j$AXWkxZy(Kn@GH>U0K_DW%rQZeit{N2 z&w)VECWtTtbx5Wdg2e{Z>&1ys)3T=mCwX{i2%4tSWNZe`2!*S9aTf=QYbk_Icd#mr z!j`Wl+yD~4)okrVxH!UJWm%lu0T+%WWe?ot7uZdWQ@A%Epk9EqQZ4Ln8UvNpDBz8? z&0L~YvkyoVf!j_q{Qf!ZmPZw0_Z`fq58AL_sU!GiK&mFhZr~;v4;~i=ZKy2mmhW!$ zHubtzD%n!;lupF@lf=rlSiQdC$Il-$i8W6_NwX=keonr47uKa%(L080%eF*Y1@+$o2D5qhw2o zv;W|sb?Z%dhe5vta$XW<6}T`U(xJjQlfALFrUSA#ghT?Z`i@KM6-;KAmFj=fhxKCT zuVye22LRo&64+}q*W>Z^ z>m?=;YYMPV0l^^-iM4N_yW0n#SOa+CV2}tUQn-i>)BcMfcKG{KefaRf?8mp$41O|| zI8gan-NkR-q(q3560z=uYFhn++2hA&30sgLd|$ZGkbIK^{|_2qhyI^9Bikzy*EF3d z9bW`&_Q*W;ZI#N(&hC1!SM??W>;$Mq_MnMFV5>||d=gt@P+`Gn6$UetR)Fhme6WCRCLzpv|qT9SL;@sBOvq5h*l_d7s>%hQ{b^e*n954Pg`yE{JH`)oBo{@8g%N(W&m?sb|bAK>g+Swtu|LJ4tT?hYj z?V~c_n+TpfAmO4QErapT0{gG_mLPB7sI|c%1Zh~vd}Ob^y?sk>?~Dnh;{Qa_`E!jh z`t48GZ!B{qCTZ#D@PuG!|HxTx$*7p+XUQl{MgG5;?WJ(g9=>+DzBoEs>QhXNj}Psc z7f`M(E!T62FqOOy3_J;8$;02@|JwEIG@z*_R6DoLq$p}-ZYim#kVm(k1qlUI1Ljcl zpG+?m`47`jGTr>}KYExbym)^zrQTt~`afN~!0_g}|Nm0!81%>P4~C%+xs@WWzkyPf z@z^n09$qW;#@#~M`^uPb*a ztN_txY{SRL=izu=(DF8TSR-2>R{&Y3=ok4}%^^!r| zC9t=@$9hM{#z6AFQ1Yz)spJ(hKv_l=7Z0{`2*vOrZDx=? zOLD(^%BC!BKS=%0nLG$QE&@ukah|Y{Ki!W|xh!_Jw45k5dHU2HY7)RuV^2U922Kzj zPyl87S}CKU8dSYN2gSxoA)zjv3m}^{2Vn>zCHOh>34yfpmsv;#{jaiL(*F(FubrW3 zn%D5#DKO8ExKTlO>ib6hEe`q(U1y`2lz$MpH&S@ik$NMSt;l_)v3YdaoE04Q$FI# z{rB?h`Rh=nr4c|DmD%m@pmxvp=c~B8yOScynHer0I)3~(f%}sW6S`ATUF{pJ<=j1t zs0s^X0+Npl#C&NvxfigiYz)D$5+4^wt#D<@ouE4cCr+DhJ$DFZYQq@rM@vKF;!Bz6 z*Qc&S)t<|Q?8E#?A2WF zdX~W6?do-20tP?L1c@5x;bj}}myG=7iu%LJ{z12?nWU>;n3|ujdcKc*>_H2dGFaWg ztWAUaFwY1Lp8!9XGmo5C z23ZSC1`oTx>M=LA?s`h*lbat`|3^#1zcFOiM4-|wUz{RFsXJjKii`27#t~D4$JLUQ zos+)*VCo@38j6z+n-K&$FlfUAywJt6miN;Cbc_5B>;c<^uB5|<-T9w%3;`*unr(8t zuD+fK0RRdn0@z!|taP}IFi{6(-09vU;Eq6mwI1QpsBdXUVenZ#WKZ}lg4Kj@pPT%A zd^IV@jvX5wANTO~Cf!m~Rb_Z7a}pj9x{iYdY-4@h^H%J!bsFFDq@S*mIt)e7-^>tB z)}aJV_$&v9gry}fi0x(*4M(@K%)t`~@$KnA!BOz|=Gd>DgX;tvGiP;*RxvY_RNdX( zj~_qQ+}1Vx?FA@-=BPBQ8D%9Og6R;r@B>N8zTmI@7#m9hmXi4n1cD$4{DV+JpFe+I z3d~JlfdR)ne8)i~5=mG)|BT{&6HotUjOh8jFPr~2YjZBLRHtUArl<`K4IhG4g&?>U zV3+F;l5kIqjd=swCrEXm<4Zd@Pm>!kTD!%0W5?ilmxJk93) zWrCP;9X9&|+XwLe2ZzQVfjsKC?NZSxKRsq?>1g(l$uer1R($h7c=c7KEIcuuf`Vcy zmD2kF!#Va7Q8}#7dufM?-sby|$9J=t5h^$(~s>~-@W6dP)*}gvdPOl9*lAM>8?vjSDzV*cxzs;Fz z3NQ0_IfKvi_1lNYL9GQXs?mSnPdT)-voRGBoHHUVdA*fN=y7Xuo#nGv`&ECecS&dzSmf z?@BAC@hP%;2t&lp$W1Y><}jZpd^Hd^Z9emC4++q@tq;pPmXp^yZ4ORubDYl zkjAa7=#0-%rk}&MSiKjbnSavB#(#lg;~+bY-gX zdl9PE^AvhB9W@Ga-$FbG<}O|&Le!2{k4ijbn=ZthQ&&lPF3(vV6MxUcDk<+-Y4n5a z+sb$M%$W4Q-mYdrd}`O)622G}b%Ix*D(JH1$0C$gmyIvrSOz}QNm>8>@$HM7^CAU# z&*GIa565?J`z6o%a^g2_Nl}B3t=jj5sBB!)$|w-xdW`Se1h1wC2s{>6Cw?36dWruL zBY)f%3dQ~E{py|#;hq~BZ{M*OC{Iq5)ih90HHQU+37)jHjN7xvtgMrM!~Zxj;>r~( zx#jgsxl9*fkUEju6hG?Ui4Nda4aa7?qcb&s^ylwiv0BI;91^m-#Ca2)%*&bOwB&c} zY>q9>K6)<6?yQSU?D8cs2h8({h|LyCh;AElFj6p_pkTiiU%YJ9W@q=hc3jOoB#npg zvi3!@zMVll9I!xlLA?IRc{95S`P#qSVxSmNG|fi2WVI9xp@S= ze9HD(M;JMfLyQ%8>Rp^=nxZUrsUt-; z`r`@c&GSil>QdH9W9@|bINN$MX`FKZ!HPm+60?PkK^;MD@yu^=HD3(x)g&b?eJn#X zxCNfk`Jxy#keG6y-YCKG=T?M&G}Ygb&_22(uK#dTa{sxL1k(P~CyH45ppnhlOQ)L7 zN=-DkH^rX)_W1SkeEwrxtL!4EvfG%o+lA{N#^xG7@DU*n59aMdJO+0lDw7ds#DAeD z>qv)X*rlsanuvI88iD0eQS++cU9-aoS7mDm?5!w?|-p}72 ze*4J4EpGWcN5#Yygv%^`8<~@==3#GQqBKx+_97*f0=g>3@O#T?4zdk@7WxV9)MMNT z<)R|nq#TBlQ9I^q(eVML?%(olh!E`Of>Py8bcqpXYinE8I&W|N3ZyR!u?f#Y-2&8` zwaW&JbN$Av5Vg0>wtleC+z)*5?K>;4faje;`=}YQD-Lw0ySr1538nV@co+0`X#Mj| zv#={$KWrr3kDXVEs(f=AfzXXPBp_-(giX~Eni%)c)zlP1Ito3_vvR{OoRLtJy`#q1 zwy{wD)yY98M9=GB%h+V}V%?D>tIE1qGcyQ85~gZQleWfHM|Zcd{_yE$^xoNHMoyk72Eiu0)@p}it{zW=~&+pSB>BO}`1 z(_=d=IdgKMhDPTEk?oZqk6?Gs?x%Ro_rArV@&F&h z6;<28tp!=Fh^_5-YL$s*ue7^l&Bnba0&C3-strQN-3!*fGu5C1MK?0gG%G-Cm+tg^ z@j_Buyo9%=)%R+=q9l8u1bZOs>C-XE>E#i;pi@i%m#AUjwi^Z3nOZKtZz)Ehp7v%@ z0wZ+$T+nU&j&0_di&{lY;lfr4?G%j589yBC?W1fK2CUxUR?a~WD|E73h5Mm001Jy( z1x8M<{)k1Feg94itdkVb){CGKMt}Jt?;RWz#0sYaoC`W2ARzFbM|VFT=(nZB{M?5KxK5uAfFt{tQd+VBf78>MmXu&J6h8-d!Bb#c8 zBpzF?OElzT`~v0ijdvH~*W{<~hOcgKEck<9-+3SNGA&_yty7B!dV`=tb{OPl5Y1eD ze8|B3Lxmvpl@{SJPo!mKr8t9i$7%x1%*;wabje&#huUxI!aLa8>|VFw~wDgZK~ER|*|rI=Z^VP&$0kD%QM< zL4d4G46eWTAt9{56A(11he$j&-Rx*_axW9Kf^Zx6@D;50JW0*SkOsp@Mi%{zVo=jz zPTVsy`7d0&NNAckeE2ZwQnE(=2;7q_I2&|~js3)&Fwmm(z#A;)(0mgOTu7gGi{ zckmu+>5kJsK}qeL zjC(Rl;foLqCWF9(l-JadcBW|2XQ?E%KW_O*j(R#7jFm=n%JW-*$lpA45vbtF2(T1L0Q-PGXSy5mc4(<-?m$Him3u2#d^p(Ho^xvC zNyCbI2Igqc*rQ*7c}7w$=8c7=W#&6DMc;e$=+mMz(2ir%)8`7PC@Et=RS#sd8`*L|{?)bg+uUTATjd46; z-2K&~ufP9v3YW~6ok|P|%^e^IgEndt)L;TK4Zq7QW0WqU9z8k*mX%hB`!(I&Atrq} ztQDU!;PfukEg%q&jDRJS4+!PjA1ZI(ypbe~7PJ7evE@1M2orsTK+zIvT6A5?lOl3u zvY8f<2b>5EO_FNba|ps<3If7%uM43cYL*v?MDM3y5SNjW0fk!H39}jr=9JjF-_cNH zxNfh9q$^l$N5HSlaM|LN37mQ*VBAvzNdel#+ZmlyiySY5ZOKYAO}hqoBA|=?K`1^6 zK>+&0zvV`Bee(4DX4;+*wAj<%e;j%s31`@hT@f(+CT(l1qvHx%|Mee8$4zP=L>E_A z55LOYzj6^6Gs5F-TRHLMR|4D-|`MAlJOEABP9S72DKQ zb))Uh&(LNKyqC1UPILI^Fh;FuFJYC8*Fz)|qQ!grKH_rQ6L5H>j3 zZDX!xp(Ts<%Lnx04xVyO#!^$PPQlvKqDY-028H1ff;t35!&{F)e+>2@y zn%QijGcX|xxf`=^WbUPVMMOkYA3Pu&gm7HMS{l4Y=F@E)@SD+QB>u1f^mz$GKcB~s z@3AU9-GS*XDq)7lZLuf0FJ7bsm%22Zf)6cMECb8G0+&)Wa z85tWJn<~(#xxL-f+e-*G;H~J&x8VY>p%>h^3dasOQCDX=sR+mh4zxIXAv-;tcyUwt z6&%2!3s9p>{a{vB7Qti#ZQ{ctBYU8ANryTLEJd61`3nS1A2>v58JRNhM8bg!s(H^i zigwl?^g;HJvwQS2u*{;xH^;5PHb&^Sf$4@gP64|P^bZ+O0{6oJ9~K}023Y(b*XY+R zl$Dj$P(L7p>Bm?BSOJ%rFNlL9kQJ)njD>`^Fqp(8;Dd7Sgd=^p1q3|P(=CI(*?w3B zLxT2`CvbdWGkHDqv}zk0Ct2;2s@#Ovo(QfV9vj;W5%+A69B6Y&=LxZb4(vJ}K0eoj zSZKED&Zm?>x=3_{{`bS0or+r(Q|Gl{BEdjK1H~*Hl7r}}exdm4)hjbgOSjO_V_{)o z?&EiRwB+#I{QUb6h5owy73Jl`a7;<-WyHOpWdnWvX{S?B@JiulQ!bF(KnmCc_FLi; zg-&kIpHm+{?g#Nh2aWg$fw&LCg*ovw$;qKX|F!&mXNjdhD2+cg95cFCZ(*?igre zZA~~(g@%EFl!SytJ?|Nr+kQ$(a3t&K>+c6z3@--J(Hp$)(|z{=zYq?qI7~+edG5w$ znI=RTt4+Y3gcG3vYK%ggkXmr1FU<`W1R+Pk;R>KGfm?_IQm`9v!ee|1fzDv70^x&j zkQE#=q66m`L1$KnngMw8HUYPqXzAlibxr_PBCu*ze~=`z60G1by*$tz0!?~EkhTvo z`>Fx~4*Q|de(lnGWwe+RxBcp4d0+;OGSw`$0n^UFB~)Y?_i{rVs?{TQ0o(FPKo0~X z2^=6qpuv-f0>Hzk3tgAqV1NL)OgPd2l)s}Tqoonc5+?)&1$A7~01p5nl!S?B`F!&j zjK=6#ZJ?0V^x^+O+?#+y-M4?kqq@?jl1gb2icpDYkr0wlBq3p<&62dxB4d;#k``-} zCR=ust5A|Dg|ym*B2<=1$Ue->`}wNt`rp@m-_QFV&v87@@&1qd_+R&Rml^Y$-|u^# z=Vv?Bhdz%-cQX-p+U{2}zgl7b?1BkX@PctT8dYKCgdr-Ue8N-|`~-ZSj%Y85b?YMB z2Y+nG2=Jw)MH_NdOOZF|`~u)Z)J9D@;NoJ0{yGNSRyqw*Wj_+8aFTD=KG+5$pqAFl z`udx*r~L89#EB*_G|q%zeHkT;_bKmei2ny8yXj$ec;d`-5@cQSa`!G zDJFkK7rFB6nKP!AHtGWVAK}k;;%0Jy1zl%pL;hDNJ6;wRCg^{aTe-5T_O$#rYFt?< z9>bS)bx~<)Y2+iuo29z_Fshkw8BqiYo4qGq4+;{h{qUg_{i<)tg5PjO-2wUxY5yzh zld2paK76u5o%n_2X=7J;kMNkYW*>+Oy_wZ$ST#W_252*+g@H2R6IL8 zCvMSmewCpQyMhg@dr-b_Ad zvzYX_+83u+4Xu>^)zf7jHuxDfIgMj8ATL6~3y{w@1NthA2=Kc5KO0{8LBjx#1hK|} zUqz-=JSNy*GkWU8#s50~>d{v%_;oXqS3a}){IA&-zwGWlEg|W?@3kVY!&iz}E8tVZ z75{$8R)x8kz#Gk;2{cHaV5Y{Xaf^Tgtu;yfmVa zYJf3)OR8D=9pmMno0~775Kqe78K*m$(esrLbGO2V#W?1W`!$D*cBfA1o;maI7!N~z z%7b)6X>YBVQdL34qT5#*wrV^6J+7Y(A5OmhuhaUImyWUg%5bQp7Xxr8fDeL9wzyA8 ziV}rziwK4p?W-`toIr{_QkOHbXwW?^T(rp6(eV;G?NK(BpWpI*gxSO ztX+CnQcdO1b;NE9k%lkmInGO5zHWr^6})>l9e)1mx*DQ|4ALDh6(`y;&KdEp?&r#I zy4-=r!(-@}ZEZ+|qD$?ce;hq}v|t4BT85EIe+&!^;NxTF&Zs<3D)g*b7myI}yr^g* zS`t;p2BtiXf+VZ+h!p~QLIMooPnnv@p!4?3J6`EF|1dPeXwUs>Y|aUhT3wxfvwFa; zU3BhT!u{RL&h|@4SO$45vp)jU@c%AcP;(c@Rt%lbTfJtDcTUbal+5wCb0us({S2(? z3V}Ym421+nXd`9ruR&jrU7To{u8R`Wjg*i|JuWdr!z_UhX`E~dJD?5%W-Pm6 znSBrW^E)&!W&<)R2vhya?r*Qy>huJN&;IVu&yuLDe6x|xh9?U$GBUamR2DdQB}s$8 zS4(0rFck!|KO;8l{Nu+PCkl!AU|wsc^GC!fBpnT}QH<%S^vSu?4_q1-6VDSsg@i0F zI#A+eBi9SzhvB!5Zck#jkMX0syIT_v-fGX$Au;Y~3-2{IXYB~{L(j4S?t;}H`9fb=n37kmT7?wN^m{yv z4^7{{UlyFPSnm~e1C2{B30^j7>eS~fKG%hq3QO;7?(XlevP?vEfZ<~nPc}@yqz5<9 zejNL4m^@LBe1VgU;_jzV9$95%S>w$~UJTNjY>b@j>!HX)DJBB#ZEW_*{{(bC^fqYB zTcgvpui(-gdEA}j9$9r2P7MQ;wHOfwPhD7CKlBIavODq&kHgk(ynHs?MTO|=|+-azb z&JRTRp9EsJpph=EJBmw56f>9!Voc?-Jl~L@69~AWc-lBiM8x*sL0`rFMS5fHTROjP z7Q{Bl?n2TMna`fW2l1e}YNf`$ho175&095&|D^@^2kcSS2Ww0K9$93)Sbiu5pmN77 zwG27}#~PvTA#cy3%kxRQM5-G3)V{vk8f?&BEM$1`c!!htq+!VdtC8;M+x(DHl} z4JwQee?Ws+miO%!Q1i4!d<}|sIj83v%sLo2nu-nKF2>Z}Q+t@Pn{!=&0*^2n$1SiR zp`$0kMYxg8mBn{I<3^0QI1f;hizC$iIINga;CSMVw82HhZ;2QJScAvo1TMxgY3tyy zCtI3z)|D&Q8cZ{*aN8A<1BXpe@Cr z0{k8*RTbRljg8CZANfO#1@q9kC66Kl#M zJn`$=V<;#XLA`+~A8K&5^nTo(C5?@&a!-hP+M`c52shA&zeZFh7>Jg%*VopTel+B%^(Zs_H{o3C@V-UUZ-!3ciZ z%ASQVk)Pe&OC)1qW~>*Dxw^yPwY} zP{rgYdGh2*$GQiy+i$By;wiY3MQ#G}kulQCtyr^NuL`HdX$$RKi)z!si&iZn@on{ ziiXM^;fD?{uSvLKcpImQHh7t2EEH0u*Ce$4{gt3TnE~>*47Fyf$;?F!>^r$ zXdF{3s})hyf!`IEoGQGecb?d=|JEZi(9J{kD}p;N4LmUj}1F?`Vbg-?fP|Lfjh8( z86)&1Bjqk2cvCsXY=HWA5eMlLwbof5abk&pQg?Hr@4Ux@o($M5xWfH5M{vKLt4$=y zeIogSb#=M=akKCyI%t6Ea$K$giC(Dcs@Ljn7a}I6&jQL_;B@-*X>cq}c-Vq_Kg4en zY0C2T-I8#9C+&X^K#Th?09sblKLNBVonZlP|K1-MjBc!RSqw}B^Nv+Q-&Pb99DP;@ z|AfWv!=saj6Ig2g^Hp7!e>t%M|Cu;raoQYMYZhWM6bjS-1C2Q9Pf0{(KozsHMPz0d zy$YzNK~t7~Lb-nZn>W)aXrdn2m>(=JL4_5ALi+|xhU%l;+Lsq^vh)vHv~ez?M+%-? z8$WT{%pb)Q7gNJZ@urnHhR~a$D@g816`7W$VSKvS#qB*+Wn1G|3E#n&HQ(DmlJ<^n zmohSJ?_c>o0w5U@Ew*)eq64_NuHnphagS~R_rA86IlDkhe7GmEaN$CqdIqiyD!U&D z9^hR1U7madhZCqCiNi&hyGaU39wwUIa#DlT;~(ixOuktTx38a`JdBv7rvQyeZ|cEm z2SK7?&1H>F2_u6!+L^ZUgsH$Mh(wey2v1wyccH$JoG(nyBv-x%VQrH~W(JPGT0ew4 zlhit-umuvAp~-xbCx^tIlAQIXl}S6LPk^pQVD9i^xL+(>F&PEymaUiG)3N+yqa7ZFA`eXvvH19}?+!$auGAl9V z&5qG82ju%+95k_O5B!?kJgw&|Q~oXlaS~Q`7CdHiLbTt{U*yD|vwt2v-w`H0lSu_o z4B$mVMrLd4*F3-SoSdAkNWDZ$PNIe|r_aFX#59RSz_KqAp^2#%cEi$3LSo93R$4yL z)YqTlKS={(_pC#+s&}(;)oL}S^|LzMe`YV0JsWB{onl{|HXjYqMLY^k$V#8}&2af) zuwEqQpwXjO2>6H}p0gNGWlvj%XCYU03UEX4TunIg(t4gv(C(a}#wd2#{f?@#`PW?% z!0TPX&RP5WCehpOkxys}5Kv?UAorl{qv+^GUd0idRbWlgsLcJ*5B)GPn84~TBq}P3 zm!IrC6OVKuk{T{zo*@%0{K$YMyL)?|BjLLT7$ONT3+^rEnlBt|tYfqM>WsdR42H1V z>UON)LrqFNfymOUUg8}(I^V=|V+GN4uQ-XhYtNp@YLPv1jzAKS6qS6Mk=qO)8xUS0 zD%ia+$DC$ zckSABCIy;{!`CU`NTZ@;NIJD&PmJ!alG|B)bBTR!XQRJO$*| z0n~?sAotZ8F7iYR-o6#W@MJlRnOO^T07-#F(DfJoHcs7E^kIt`RRE@7F%%e zVDz4smx%oK`EXqUB}6)8F`_TKd*5rcAk&Rt!h~@{-EX@6J?ajV`?{*CXYZ$8^G9_@ z=`>6P+^vSrRbBU0`2op4TH`E6^oKp(zj;$RtXBlmY}#VAc{tleZ$CB3Kn~0?H@7Va zyt?-tCUpC<*gwEfl!0djZ7`K~8n7qD3&x5TM9XVp3P=dBf~jq@j^*I8z$V__O&{s0 z#l|DshI#2szCj5~H|`l&lGWh3N$-q@D&6&#|01f_51tDc6U;G9gje|U^5touv6USw z^$vRid!Bat@rz~qxB$`*u2_$1dTa(*?Q`2R`)r#fyR~ZSpLf#0eJ%nu7h2WiiIZp&#Ap>Z%;ALBEYQ zTo><{_TYBR29ar>PD+RKP+>|63w`mt>H}!%Lo`h}z8v3&42F2*&6^eQt81bqlET-pZ|5gupq$4L(!n3`~ObPzd()~y7rA6m?v``va~TI%CR z9)KK>lvJhj)KrdqxCV?uK0TF51FQahXsOai{;T=9iK;+EQ3H{uzH9U5^VORI{OS~> zrTu`#Bhw5+%pE`u;5$L>$bTA4Qn047j`txXYTGS4P`gT+eEf0vzVQARGM~b#R z_}O@q;SS4AU{u0}>#!d2qj46gU}w+92D44PHn z&@iq1)SOR`PF}>5*ZyR%VGOrM9KqYjH5dI;sx|tmx_U>s5l==ROcX&RwLUnY%Eu=V zRNVjL)6FACj=U@`z6dpX){aByb7Rr=U3>IsLv4M11*9Ev+uzx>J=a^oUA)QN7=fMG zUl0Q1_~}zkoJ6ylar#cUQ_jMJHS79NiOmYk10GSUZO84gupr|IesA}o?wFZ`RK|0$ z3&f~ijIl9XAZ?%$wXD+3DqyqM!O0nKygUgti~zL~tHKGjX~NghK^%_^0DLgc?krRF zATa(N^*IVaxjR75zrDTbW-baWR2r#`GDdenOvIr^LR8&WQj$xT@8IvNnoiXRZC zIX?Du8X1+jo6ETRwJ(cV`CinTddk4S!0oPeCRbTZ2wX!T^cf|uUR@$gfy$8i^^9je zsJi7~Azv01`Qb~l|NQiP!IvPF^CJo}pJefZ$qOoK$2~6u2)DtQ#rCr2gP1im46&#+BcJiI%ivwyV$Ux%027 z-ssMw@eeAjvt1qY;6bPj3SN3`86zP=f(AmvGShC0(6^34#BJp1OFggZN$aR-Y-FI< zcy4KwnCqD9xu?^;;VM;~zD+92^ASDY>LEXW;q4mUNM253?`;jUH8!DLtnb5;Z^DTU zhDeyeCo|vfmFwdqCi0btUhZaSC!><`uZ!o69Bz5` zTQUc7VE1GVozVY*{|me){~{akKc&w42Wil~Z``>ivdI9C4gF}#@4q(iXG{D3+Np;@ zHIfkKoSeav^AG>}v&@J^)W~0QJ-kt-T+_e4ZNM~&@J{PiZ$)nsV~tHb(U}N2EO_%K zZAS}C?BT{!eo-g$&TSxXECKF&2^f6JJxI<-n-77N0A;MxFX!96faEj?}FC9>p8p#RJb>KPRi4UhQ)=2g#h)+>oPL_{uD+rCs)UyK{ZaT z0{>qANVs@|O~movzrudEKyjfGoAux&-n-rUeX(p#sU3y@@%O)__~38+e*b^}sFP<% z|1C>Q)BgwC^8bFh|K-&MMgD$q)e|JOMZoo?Z zef|IX4q^|MZV=dyy%&_H&z@~=)m_-SzIemC<5y1JYWe%WjriOkt;$)n(8g>Ji6jLuJ?g`ivHf zU`uw-XyJ6<`q!J-Q+erMAE7A4=O33r?Z*G=_Wb8}W|Q>H2XX~s9gAf{%L#qq=lNgS z3S&*>Btl9ci@g>cY*JHetMCx6uBXMti5d5M3c!W^CdjhB9aQ+G{HZu}Y_%=+O|?}f zSKNPP5R}l>-W~v*6-1ctQ&HToS)j^o?Ty7Cvt~^^_ln=y$fEu?QE1Ri7MOKl%zP1Aq+S&og`spBon3`>i$Lo%ewa z!f{sRW;S+r@f5#s#-XK0X(&P&yM55paVZzzY8;3Pw{Z3SMouV0*wJyfJ0D0Vhl8HxlFB}XB&4EpCuP(&fau?S5!q2kbfwnj$8t%45jzw$lfhzJ4F ze3z!UtLj>i3lUHN0s#BY=f?Pr&2j1~9{a3GNGYK0DeUiKj}=#{?THdedcl%`dFmby z?~$Vuk7~gP#DGuVzTJL?YGOhHe8{5@VIW8ok_uYw`5&~91A8t~(lza3f37E#>@tF=?egVJB^K( zyg+|>2~Czbu(XSin1Xut{{H=lL`IqY%*BhZpqqI9>eUor47o7P5Sfr{Kh+vTY?=o& zYV#EBCIE>LG%_-RprEGhs!AqjO{m7!ty>XqU{==(_~HuK8Pkm}=&Q*$OKR*ve4D=a z@2^>`c4o;6Cv+2do_y%~2`2vOAQqr&&_WXm>4-mOK_auHcmY!SjHEV;yfcc>@u0Ck zO_EcQl#_O<;WZxkJ2(`{mB96t=gv8UfBd_Sbp{1Xtdsx`5?I8RmSq`)U+XiB&H~2V zmeWvw%v*b8p6clzmqCP*WKtHN_jo21OF78R3Nm5=X8L)`_A}86XARgU@tiCXTmz|t zHz+azsAu()HDs?q&x{uFDeh}hCfB*#p1G#dQ$cH(2wKtA&xNqfDEjc&c`Y5?@XQBb zCJ1o2ZyEwc<@P}NN~i^B2Zhj;PZAQUIvd%K5epF6HK<>KVA%`l`TyS|RS1J6k&Q0? z#fumFP$~7W&m=5t4p2BgrZ`}RWwGzl=xgCn)Gz-eyq z2QK!^pNLA|?)?0VtWZ1CO{Aw390GDL-oAN5xGLmenr}qLDpIsYp$I4JF@eDV={ppj zJaJ<6{-S9lz@CI`V;vs8Knev9nGWs>ID}WG(UEBe9-3&-G~X^_>G*)VltK_R?teiF zsoH=c@8#rNL<|8GAmHlGATZt0IhDzCssn@zzp*!%DF_6AfA5O_!AdwTTHsgLwPiU1 z$Hdjt)O-Yfsou>-+O|QgC6QpVjSwQQ!7j)`KYWIT*%Fh}oQ8{jekNTFPvkq9u^_XI zL3s&f#3_)mqtstm%4ZPEhsf|l^pWf>y1KzE7CnKBom~|hyYi&+_=s^y#>ES+KpPQ< z`^{|ZmH$N6bps(1Fv0l_V#q#VXQzeo0DZ}}WK$C>t6MYa7{p2rD7}KzX-rXR!pEOa z@IqLJ{jp87zAQ%QPkhEQzz4oVI36*ue@*@w6Bxr0c};~@RcOAFkH3HZ@V5n6;eaUB zpv9Y_5D{jz|Limfba9rOVA!!oh>m3@urfkKw?Qk~2W~6GYL)0uvT_*5~93Gysy)I9-pdW(db{mKxNs22jjbrQMq$GbsL&N23*P2u49ZT$(u2K`d!fW4pl)PdgQ+Wc0`XRHBM7R+VBo|cnD{Yw@ehcgHKn1 zfvg<}P9>D%V&srckKr&U1Na0mxD{FxDX97Im;Gjjl+#~y7`^!m(66EY$$e4jHtg>Y z&?*^kWeTTvLg!eyvZ!j$QzF%rB!y>XxrOO2Lk$Xm5tlI2NW~@@{O|f-NUJD^dZJ+X z)PTuZE-(Kaq{PzJOuSfu)6}Vf`t-=k2p+n9NSwyww6)!XOMqQ7iCHrML^iph>X6hM zfI`@ng=`gZocSV03vtd*5SG}q$=ZZC035mI5g7v_^3=0j%sW%-A)q(i&e5<-d;Q8@_)5+2!jLtpa zuv&m!mRoL3H;aRoj!1qGtWKHq6&=Y`-cZX70t1sL5y!g#MOU|6ye?LO4~?&mgF~{} zO-O$&o$8_Zj{#2yxd3DVNP#WSHT1-QrMF{85q#S_Q6vE#I9fk45h?CU+8K8YD%m4Y zRp1ydL=l@oDVQ@I@x^1xM(}k2WcT5@KLbF5;?Pe-*Z-mKZ57Pqh=km_W!0X2<VagNz`myFTGbOc8o|yDW5G)2%8;66DhIjk(;fG<<{+FnF+T*`a z^)E8L2VaJ0q_qXSzEM~%`g+&!*Kc#E=7vP@s}tW3P@Vl7RA=Q*%VOmJYDEUYu9@&I!A5F8KUUtq4po&z`(K?$jSIx{jy4<0zM1mHdt)t26R zxeuJ{=aT493M57XXFOy%nAM0`p~=Q|@7}!>FMN4~@xUMgd`u8Y?yDhHXjZw-zm&rT zY-?Xg#1#<0ELpbf4;>$D9@@!XXzSu~73c@NHv4g0C7Cl9V-jBV!l`*^ce1HH;A+$i zZ+u%*LlnYmo)J7djSV3Kvf}_VwSP1(ui!s~H zS{;Bh1U~z)*J5|wm9gu;+dAQy4_Z09r(Obw`&J<9ESEi9hGxn;<$>6ABi&w?q5~!xiWO*BxDmn1l9K&oN8kkhF5WCecGNi z>1u}$9MF^wi9ySwn!~i_57S`d+kI<2l#jYCgF}@hjbxEw`Y!7NZgn z`8ISJ*!s0iJ|9*VA^;F8!T~T8$!)%i7cB-o(2zRD1zSym2&aBtHJWl{P)w~4@_G7iv4Qx>&P8ZAz zc6j*F=iT@+3O+KFvrb9r0*X9QBIEXt&N-_lB>Qy%mJ|@gn_vhCdknk6s9U;>jx(nS z)yW%o{m{rr3k(*?*GoiS)8KP?i1cfc(zL?j9tAZD90p{-YoqXsjY6`9`14}WRm8j0 z^yzftoNR{LStSA{K<7bZ)QCKOfy?JR1d@o*9VA>4Sn*Q;-(FznYxEk>^biTj3UE+p zgdmq9(rBNDfC+$zbpt}CoEg=&s@Y{}L+83dgi6*+%u{g;HA_=%y-FHqB2z+PI}U9= z*|sK5>l4N^y)9CMurKjE3b7+`^7O92;gLwJ;xi^p7MXwo}aM_QPPAaW`P8E~N zz~c-~AruFN17Hi4;2cc9!2p#Di;JhehGYp4aZ3IexwJv50snpZ=~FazOQvSc z2+}2rYq%H+usVftIZdnjW@#%;{<_g`8?cMCHzwtzVD96^aK;cyd%)Z}Yl6=ODPY#i?3 zkjTjSDvp)@Xvd{q#Aj1+E0#Qe-tuS#^$}5uz!`<7(9B6=3=A>UsBQF~3@i(C;bv3Q z*}7L3bKh)Lw(;vD6Db-9Ta*!%TaP1lVRT1>0SlmI%^utL#+Y|8hGm}VY0iE3YHIsI zCM!y=5ps<({8pgtQ~XZ+g2d!z9Xs9}bcD|e1O5S`G6@A-- z#mnA|HGGcY$8L`Q8_T&ddpG^Z+`s>oSN8A78vp8(jWYt9^#=uc&e;^0|3KDtwC z`O%%5XDd&gXS}c_f5OM+3pM_+da*@IJAz(5IUXpsrepn~3jv?iV<#)xTIo&LBJhW# zNPz#;*gLEJwj?xhxlIY451yE+JpRU2Gt!u0o%n7;S8Z-G^7yOwkwYSFZ_W-^7U(?i zheQ!RIo9rbA*M_~L7H~^%X@sD%y2&=>3h;BY3EU@049`w&cnS!#Bs;4 zcp6?3?XG-m3GjmTsxBM1xE-2E`>K>VpvZ*so$@k9Rb=$Yar6BqICIdl*so-8i3yq> ztf)4)mVE~g{(%f1s9h#eh{K^tv$=LFcJzH2Vqur)!^e-GW1L1+B0@jPXPEr7&O#dt z>(**C4|j9Y4!1C{+--BF8fH@g0;D*NAYyxIA#4Uxm{kOE52K0$50XaSY{x>EUGghe zI#-{`dix9*zw%x+zqGF0wD;k66^mK%@jCN%q4gk*ITFp#ggceGJ8k)6^+A$lE z*L>{6i3?AjY%-M#)AiJD(1_BejU*-aUtG z_#wSeJ$RQ1aQ?b>H7p{_(IFxW#6R^`?b7F=>6KWs=KhYJKLiC^0Fl5k%INMrqavt^ z`iV^%Vz}-Et%B-v^7|a;dTCi6gmSi|y1{-8_V=DA*PWCbM=Z2_L6Xp6rh9>ZR89X@ z*vwR*bqaccc%9_D1GN@{VbT@LY9%8n5RneaV_NBA{0b(F}BBYoYRKy>YTFH$y`wU=VR_FaHDM^V`|7 zxa9|0wJr+_Z}J?yf%8BckjMcChi!`&FAj~0l8Z-PS~&rOTHy~PW==JFXI4!c#F}?- zYXw`alEI~d%wDvdY4u2p;zL%?3SG$r&hr5I+hC?wV}fCSR9L$<9Ivt!N``{$vbe1k z(1uORz!gQ|n@JG^rnA!Go|iw5ehv-6mKJ#h1sQbQ`a5>a4Uz&m2B(*k@pt?%IRM2y zhhnPx2#rS}tynhR@hVXWxzU}u3uevIf?8%`;qK?x21JB+o0^tF-me7_(fRlx$fJlc z1NWvcjVQ>b_+GfJc2+X^9U5Nu!B3|#W(NaE*&eML24`T^z~m`Yi1Kb1S{3}0=`24b1lFhbzOSi?fJ(a^x&urE@>us=mPAdXnat147qVv5<-Kg7 zT3H!V+R%U$WJ5NBdLF$rhf3w(zgO9eI4$`eV=im@vmTvj1uy9MOCwT+_wqeqz zP;`8-uXr*v5mobZGMK{TBvYmb!E!?uuwlx(-oU*b1eL}#FzRICN`ptPe<1@~jUgbl z8y)Hiem!k0wp#i}BTGSyPHS27fQypk2O%HixXCKq+94PbuAl@G3G9bN+;6{^{%mO( z??tpn&}I=IBC$hazx`x}$Ik%WPiPe3CnYmx% zJZ0)sMb|cCVqHN-BWb5~T6a1FV8E2|92hpb=y5ndh-Pkhffn(ESQII@uZRjmI&EX3W>UNkDN>Mz);~ zl?O(0V~LuPq2W?IHgdAEJLMCDW@_>kRAn5{fGhko2?oh_Z;1 zWVhf*i5+&T3%!1QGw}*S7Qr6pMIpKb(Oy9e%X&5WK zlA$ei?``88umnd8W*-`E+j?Lv+oVHOhN>oWG7QNA(QXg4Gs=MWn;wM@Y}*;pzcO2$ z|GkBJ^56yBJ}^OwQ1A=iCBWN{2YC`o9<)sx`C_pinZs1Z0D2Io$aq@XOPhJP-3vAN!$uU% zef#coucMIe8s{5_gba862L|0FdXGH87R>U(Ozo@u$m3HWYpKnd240yrXK zQkA#0wU2gu6-zmF4z@U-Q{(!bsLpPY^el(H3^kq+x6_5L4HCZ_+Zz_aOmhRn6~#Zh z5ntd5zH&IUT}0~kFq4Tfk6;ZTgj6gRjjOezaO)`Ct+#4EQauFz&%M-uGt*v9d~cIBPI@gN4(p$LnxX zEFzT@m|#_Q{f7^lF~*-EX}9d9^%RQ2uUo|-#Em_#j$k9;SJE{a@5<-b9nbH5w~=5< z@Em6TT7wo?EjyR~RwGEIXXEkur+j-n?aw0l``;@kVZ^;m$;_9Tj83;<~9gd_{_KxSpca7b$XvB}5ZK7ai> zbu+uP_@y3q<(8U&l-v|HCu4g(=SvU01K9{gfNd_J1n$ROHU!npmR|oS`RiV=S)o0f zFni-hn2M5m(DvO4Ufc7LN@^d?LHMZ+c8NpVFT~hQ;svxL1KZwpDn<~{T3nUg5%=72buctak zlpMepPzb=1+?7EdA=29~Ao;dh$=b?Fr?&=D3_tAXBwN6PTPp6tmWZHAEj(_PS&j*( zshLC0M~-~{{Mie)MOnxs6=+4!G0bSv{8&096kv4oa6JR+$*by|N%$VCYQ=M#&q4JM zw+YD}K~pl00ziBcbgt$$Xa9g#*1hFE?6d@iMN}j-fC6MIF2Y`<+C_e%xkgR~fqjp; zx=x@3QOl2=K3$A&X|s;%jHehI!DpoOh@4-f>guHxqX|J4hLreq;Sf)mnwhPwt(u|| z|6m3MZYcpZuII@8j@*2wifr}5Z>@pKJQMWokN}fcCum0efI+)BSQLsEf-*c295MyIXEg z#k3c}7UXtM}}0PMgNt1{v-0fb=HeQjM`Fiu=#j%c+U&_eG9(IHy; zL_D}AkVfim3UX6w!p6Qhv^?xjtr*jw4p>6kTL64fiu?UGyS2UIe}6VbxQyf%ow_GE zw-I)}B;3hF1Bpq3i2l+ARoymW#mWQP$SKfg%b+tRk~nb4w^LG6ebEiPEH4j+%*5B* zU(BxOqK}U~cbJK`#l+-@T_9=l!XhFnKvEi@en6`;k zJI(DnMbGimb~1Qn>*(=sG$cQsKo&X~6|vwEeT?g@1X`q2X3W z57A{q)qA9TL0q-yRRsc>SiF924)CD@N@`*`A)YXtMBSKk*PYh;OHk9{W%NJFr8i4F zo-8LnItIl|E0FnHk)(L&T5HDvl81E#SZu%#(K&Pe(9mE>`?n3p{uBC8JB`R_(9abg zZST#s!}$&wmgA4r6X*lOwI7%+3hsP;8hz#R)hpUggUZu?`p7Lq9`lZ9F>nh=cV8e6;A|gb+M|V14IelpNdKAvNe&`w0-$%p%`b?BdkZi{5>qyA+i?_#V|`fYys%r3#INB#@Q<)@-_|0d z37%=1^Ew(0;Wo#x_n72`Aa;icI&l6%E)W$@yR3K|>K0(6VVlnq8jI|tB8wp)iTt>J z%;BF_0>YGdK=JVlpi%L4hV%k5rCGPTYVQjI2`cCXY(jJssKEfMfpm~vqn3{liv-aG zG9e9+$ubpV6g0kiDdJ?S;%Xc(XjPsgG6s=A!sVE>msovE>2#V-5@hb}6KL@0-Hxyw z!hkvs7j^)UD#ocqlG|{A6B#}ZaIIYIGr|_TO&ASD0V|OONVc{y*KBYdq2zc9rvW}1 zL{TQG)D`NPys&aU#N4AV>dZmR!X!qr%80eZEY0FPupniNBX-4W><1!xfqZ^U{)36a zR9*_IBIxsqF%+r1GD2^TSe4(38(f9-&8^H2SRmJkA_@`HdKgrR%OS{UPvJNEuC(`c zV(y-o`mg2>UIeL!CZ$QA8CBDcb7`A%vWw@PrV-YLYL>cPr^q?}#6A}55Qa6bO$2iT zS*L~pT^B6pqP;K2P)FU?5+5+&l1P%0mR41;Dg940A5#dR+`C|K2dR5A9LUzM5 zv2k>$;RDf&<&IvU(ZJqV#f|1PxKG7^AA1~q)*YN}6fab- zQE2(i9$zH;{h;~o?rzei7_l0NG-kAm&r_H^#aH(%iR6Lb5SY|XGc!NLQxMh{qRlzBbY(}QeBkgP%9RYS~x`Zj=e6P*n*RYXN)4!K(!4UBEg{~$Hra&dIvE* z90aq=#bL!zio8rR3JPVaStcA7{+v>ZZ>jG`VqiuO>M~N9E?v1Yw_J3kr4C5Jj9m-s z+SnSILm+Jo;Czvgj751b!?Y6MZ~ya4$pbL$lJ2TZbRiCy%Smhv5s+-|gaP_J@gr+- zaD>C#wr672&PL94C0B}{o8O5K$>IqP?Mw@ z1R+=l=DCsE*q*t+~a2q;WXn|~*~LAnmWUaF*1SSmZeFTD%< zaR?^^Es6vUzpV1UrxeNHwLHHoS7XG^d0u+L3wWY2Q#c_${#y3n!sHh}+2?4fF8myy ze&$t+krzcJ26(T$vlXA`?YFZ_!iP8Av&Z3>%~(lPvt%0n3-;jsPM<&*c*@)0 zU;rL?9GlS6M*==o8dJGIX+(KHs?!&scZ0!4s6;W_sb|JTJAQg>- zd|uW8p)&s4C%pb3y7mKReizdYvQ9xjdZ3_L&t8$#G>hJ{^^rszNft_yx#q)g?PL3z z{E&^^eM?|K5ruj|Yx8CSJZezUrSH4~E95(`zAFBaX?^r{$Jc!M@mW{5WBc91M<>Ab zXJIH8rr*+4=LUYs;Gs^=x1A&9$7p|@leCt^R9an8M{RzD9fV82v;@**gbQ-b?5Uc= zfSC-A#{l3ikf4ydNxoN@B2je6@LBRCkSqzRxnLS?}q#b;e3OJ zTsJ4OnHe}FNzp)+^9`AaiunT5r|-|7Cy2in2^}l(P?L?X@6|`9p?VeJ8(CIt-Z&hy zy>r9f5ho+?s$wG@3;2E(47*OJE5my#H0-)&yiJ$jupZU0@_R&`7@#sd_@E-<`S-iG61)u#K}Yk|l{@&*P1@q7hrBcvmk<4e9xPPm z9(G*$@S})m+qJ7=B7d?<<^U27;!sl@Tg*pXk}(w{@^&J$E>IO^KWhg5aRD?l(i~0_ z6u^6IN$-{R)gn=Gv@!vcxrbW;6(GmTY~cPaYl1PVyTe27X)1f zMmGrE7D1jcgaJW-qQUJG0i_&-V%VhbCk9GEz% z<|SOA)2QNwbu|{fyI!#wp@V`1Qt6Pf7Lo|3e(&F|&u2&RHG7$;!!!;NsT=T}tw1Q@kt3HO-ywY!PNQc;g-yykUN5*s1dtBb<-h?S__jVn z*}M_uZo&6i2W2}kyowQN3OF8;0=S>2EG>)W7pp0_R0YUqZ9&L<2^pjY5xLxRsE{T)@23^sWl`QF#jc5h31k(X2BIxQ zi+m4RzGN~)Lq!fz5`7{n8Zog`#=0-v1@I&(H-wzlPk_aGItEagFJVf&ei}_#Xa5_c z%o>f%K|zXrWBw-80FsLX`?S+7%~?|6)JFErN?wf$-|1XKwZ;->53ycu7UY{Mhu+-^ zfcZ_?{t+26ofa1e%~l`zzNQ1`=(`0g2-5OT{G)T?4nlW8qZf$piAYLX+ESF)I;aP(*_<4o zEk+n1aFak>AP|1no$IF&)trwaH_$ezRy>1xm(d{ve9N52K z8wlpyRaSZ^9!_a>N1qOjbg?R+6)u5E54Tdg0)05}SZ|TvlC^k(2QlT4+!Bn!+tFGp zqZ&3%ysXpQqToiW-o5|5Q6^9L96brkh^0@QIwehU%aw?=@U&*Sn2bg)EUVd1y|Fl3?J{9jM<0sd(v0XXi_1r z>0=O8lR^h5#qr0ndApwX-ao#ni_1nP7M{ddycM_z^Cz#_4No4|8SNxmzVE|Q9&i6A zd(ZHaAJ!;--={JZj(`xBu(SQ+KgBp@ZX60fc(6<~ud6G|BM9su3C zBv0$%sZ*vvuR>;f+eR=d7&xt1p4KKFsM}bECk9HK+x^@gaO5D~XKYk)iDH`{<1q zK^mwQiY{Du87-kLiU;gL$`B&2TG)03!Af{vqN9Wgs;RYAMy)a1`DXkOS=dataZpZV zsKw;a1N$pY6(Yxpf&4AzYXsq8@Es;$;fDeeGT3`NIKLBkw`hLnL+T?AZx{o)$A%dG zR0jitf=IN^PH1=UIivA~sNtGwO}YYqY>FVB;<+mWl^_?e9 z{As@s>NRD=RCwDlar&T7<{+0S>n`Z95DJjX;Cs$!kSfvrrjIzW8v$Nnf?T&eL(K01 z!yhN^1!BhKAP#J4VWLSgaTFu)NbdW6hz^h>N)EMcZsB2JWdsSvF+@^00i;67pOS5a zI11#6GBC1$MD`tw=W z*!89T$$|MpNM>-rRJAnXbuy8|*q2F)0Zh2YBh#^4 zLygbZsPX+U+Kc%dcD^a^2^SCa+}}UL=7~N<87dQMKtC&#SC*w7pZ1;;st}gn6Y-}y zd#?tL2ESr+X|B&ZN&6BrJ&ElGo0f8vXOn_9|B zB+1~)V@sa~Fcwq|;n}m7ZlG%ecl{R#3m(NO4*kwlXTOXtvE1s!&>qo1IviR=mi&&P zK~&sL@;dLU?}IT#po;*OZWI9#T$ZyZ&4bp``1rI7dsRl4iUknKeDKg*Un%p&t z7lAy3XaI;38CVnSeQ{Ws2wJ^xsO@H>9eV_ZnGcUn%8A}@+ zXSn1KPb7)|_;7%m>cTem)411vawPk?k5K;Zr0=6N8C-hb&~O{P<;pKtuUwIa<{~Q>CQetIw>@cy9y3Oj@o5DeOESpX z-^Q_DO@PuCLK0m^S9NcPG*aBbr{D|XVA_-7^Nz;R;O~PMQSaJvHKjYxUg_CqY|Gm6wHCHjt;Q#zPIZd^~4YkL7s4= zMooE-Tco72G8A-%PK#7|BYpcYCpPfIXvOJ+T0x)mn_S+6)3Ga&;q-MyO( z!bo~|@Geq_z#2;EOpAbqfx-AgvfT1+#)91{W_*1II%F@zYYBiL<&z#-Z#59(WtU)mqmqJja`AmO!83T1!!nQ)m85htmtfa8cD4jlB8qOgUz9n7xLh;j{7 z1symcI2a;6iT{8Qfj~<==x!vhlCre&VZfOuBl-CXuT=!{0=Iz-N zfa7r6B_k?drHXB!{f1Q(%j65Si(7Zi;8Y*h4pq?o-oK}Vljg)lFDE9*FkD@VLk>$_ zcCG-G80g1r!H!R0nrrz_Tk=hfSwfgLo+bl0>~rVP@vO;>@fLw_YyPKT9a0m#VD9O zOz4HkQb7O)1$tCCY78!IPT^d+nm=q^({KHF8m&bpfsO&Vs8QuB#4$$uS^0RlHf@}- zfRnXbpATV%_3yK+f0-L?tr@w}9lCX*<&pcq-@-lZQzxVRw1TdTs!#OjFp+3VHO6lX z0sk16Wr$i=HkyTB*cPn_4%@^pLl4?v&q+y1F}AU}f_}LDllclyoG~O87jct6PtU`^ z_5X{kg*inLF~R5@ii~tlwUr8Bg$`hPV57ke7I^${J0_nBPLkDg5T~GX7=MPtWgo6` zqt)DXrs+pAIL$6K8H>=RUg6&$WCG9_u||E(rOEd5s8Wp)n7&|*$^Eib;db)lENibC9iJ#j8<|Yr0ByTt8uhZeT?PUnn^4<1m zsKlokVQ^V#B;|3gxzNS{xMJFUCR^S2L zOsV`4ITI09=wRY;7LepkP{UJ(3!7(5o*aW$bMR=*OG?r+@v!R@2epTV?@pm~fawr= zLk0~ua~oRGM4Tws0t2TKuZYRxtKlp5XrceUi!;>(cIhWSU&)i6GK+?cdi$mVU$L_o zqR0RaZ!3aArKG&^ItZ`d@}U*KJND&l2ZW2gU@LOZ^hj>e(XkKxiVhO0-b4_I75M#p zIx(3_;?XCnZJ6~-0a>{=)ITjL*>>Ww6G-VGsN12%VPM+8@f$?Gc(aWBt`!E(RA9DQ z4U_ZH3%=samYPck9SK(O3XE*+h#dg*me>x_b3BBJT#mAXBXYhb(C)$dqQC?-7aVwhc+K*JtAh4uQ1Y;>EW{G!tWC zq-JKiV4Y*|xg0;PiFx?&25!6MNCrcC)KY%%jO%B}IYDE4>+v~9{C@^?pu ze96*7=^2GV8<^kU)ky792k=k6=IdmxSiim$)X}sQZFAn?8D{?e#MzPMJtuRB$h5&V zkp#CB$Bx|rnc}diG?`s8K{6J&V+hbw#2T3p@hv`}DK@s>GNg<({+*rMG1u;XEl>1P z{?&{|4mi4Hh0@i$C~wwbag}w8L;{(4dAAy9gVq>bq;X#mI11%%+od3XVs^qjx&u!F zc)jJII$9E|`nYT6h>IJ6)2qyO-Yd5L*b?~GQ@Cr4;&~NT*cMUdevgK9SFCrEiZs;4 z$Wm|$-4ImoidoL};Xtz>Eek_lSIYIq=g+m*-&1^1@4`wi8$`;sR&MBmJKG!>8T3=^ z`}qmDVts{ltY)7wY0@1)3?T>!#({6tvJdO0{|#6>@yYJ1!F)R2iI@^_6Aj#tGO0Yx z3oInK_$~m8FAEATz`-KBbg2&e?RdAhGh&MtiDwR{yU+|aL@rtDX~%8B)dEct4*?HR zA%lbsgfmKBR$Z1WgFz<~(o!@T7(r!VvnBC)kO9T~SE$mBWbW%5wp?F8$` zsFUIaRxN2l*aFvx% zTw{W1Iu5Acdut8+{1~=#XkNb380h2P6E>Dpb8aqlU%xOP#iARVhn%MW^)K_*O!~(k z@j_)ph=QiIwdSMksKX{E7nsQ(;^K^N(|@lgEQ*Q-zd>9wfCb1t7K}#7Vr*D=ysDP0 z&Iu{$`%&>*KC@FtC@wyJ0*-wA2OvH{YvBqtHZzm{j=tX?uA~@@AHd0pmlHvf@n$>F z2I7n&8~MWYt1918N*{6+4vrE~k9@YOtJ^wd2&7Ht*3coVpM~3wghS#AM#*REiXeF8 z>C>m*rpN6dZkMOnumgEGaV*?1Nxp}@;S^1rLfA5%iDhYs>LyGPxHrE6aOzO!w|0aA%GS=AZz!-ly|1T%ADk!kW}Umk5W>mz$NU3U=$QC z65WpPYf}4v5va7=4WCaGe1ZAb0s$q!MWIo}nK~VLM&5!=?d_8Rzkwxsb8CYLhExj9 zeImvHl1B0Bmi2_U%6I@cj!$RZs{5jWeEbZ_)W(ql^chUQ0K_jP(di1F$z-yF;olbe za^j!L=kw0}KdilZSkL+X|Npj(-6$E7Jw&!dSwfMJNE9P$lVYhi>?w#t$rQj)z< zAry@=ma(*&vL{-Y7HwMI@89haX6BqZ-|PDP@jKVKKG*roNbmRS^;{m0`}R-_9WmlN zX}57+e2)-Z#<}UQ`Exo6By!1V_81O>ka)s{2tSa*hU@#*DX(kg8 zxC=~+YFvBKhO+z)Y<7_nB)L}xKQI|C?Iw?XZT_oG3qFeiiRhN3aepF+pwf8-%(ZIQ zH%!uh^U^-T{6Lq-0iCY;vvtds+tK)ny%sYBYymL>*6Cezbn3wGNju7~RcqPOalD&? zSiCLMdyRq|fRud=x1~U@{x6Y5YsF)cI_zEgYP4B|Gk+9a7NCe6VYYA z;%$M!A{x2Jw8#gqE^N%&`Eool&?Avo@Vyi$e*bo)(9^FnKBQ0xgC}E$cqA+%Z=WAF z{#lTisshy>3A&VQZ-hjeW$d9*ySR;e=qIZk|2MVB^>O-NgS>24;N^ndQ&&hhw{{@0 zPZH-%`C)@ig>wbmO9H|T1@!--d)V&z8=fIl{M~~XVB=lq`_|@Wopepq`F3i>4DJbf zdTD+W&=pJPX*BN1;iC`m2=*azj9=|qFnQv{Js3bGF5I*npNu13{eAX+S~0y~@c%PX z+?B-RSmXa;1o?3!0d{gfLDQnkBp@V2hUqQ8&V6fuTJ!ZfY83hRBSvhwXnC)U#CZ@7 zt-kZT@?By*>N7;@ z?Em#pi0GsR1MoQrz~T?YKr{7Jt-fgino(^JHT$A05K}^r>86XB5*U=cAocu_{L3ht zu{G2fy4+0>+vUllC}M`dzh=*$Ul+F{9L?q!o%SOqSE7x(4d40d&p&$wu~MHO`=yn( zQe<>EAZ`K36fnMMHjJ8S8i|v80s?mPwvKTmbR6k*6QRwPqAz9!1qdDII=Y?%w64&_VB51SoaBwxsHOZSMNYb>gwE-p0Ar0`j7fpQAe|7Xh1-F z?ahU?HX;Z}nFV;i9fgEsW}A8Nr~{*-()N_>Xu8wR$-iG?b(JU{q0xV!!j1<@^Vp!1 z1c-j?U;X32jUlCD27vp+zRzB~co${xAFq7sN4VDre3v)BVpwX~_Ljhr@C!1O6Pwvh za;^|GHBD$0s6y@P%Cn7_^CBt z|8|x4BE%w*I4Nxgt;~YP*aTB;G-=|?O4?Uez^{a7ye92IQAV9$SOP;_YhxYNA3`7e z$`%Kyj*SG`F@LCM=oU(rI2TpXixuEY-<$~G2_kwCnggO5RPc_tz8Ggpz_`Wv%d0Eg ztBNO!<^j}a)#6dke|I{|aN9~K`hcr8FAG2h&Tv|7rTCCcu`-I=Wgr=w?-6w~l2RBB zS$|#O9B-e=UteR%V6|Oy=&r&DCX^{R#D4b`Aqzo9HP)`*Gynh17k4|o#0}BY)3me( z{WUo_o4Iv97AfBs00aFBVMGI|#yk7zjqG&*OJzN5|D!gqX zt=+Ie0%A;|yv3Wq#n6Nrpc{!5%Xo*}n{v~9opmQUZ4|I1V>a2BI~c=>grSpeBSafO z)<&Z5Z=_8~XZ(tKix%5pF;2V29gofc&@MfT;82F2fyHeDjGnF}CPp?Ci3=oVhK^fB zVJ*AfcFeKW8e9HnNo;9mwWLoe?j;*6Uof&Zq#Qeyt0mq%wVvJ|RO56E?u_<&|AJ#=86l)#BK_BNwT!)U zr!F67I7uBVwiQnQ^q(Z?mj|A6JLqbywMV7cBv29L_BNM?OeN$U^j~B9VW8maU)j!L z<&K75k7mOz#)oE!rV+ligPvX!y&FYMXNTKm49pqdVKUiJZz7-Twc6CwoQ?Jf!x;ATjU0YJTbU5<3JCE-o(qgS?uLIX<4$Adxc;8g%W} z{rAeRNxGwxyLQo7U-F?oddG8)_?t0EoLS+fb6&r~>R#E9NB)7XP+z#B`(6QP;v_^t-b_M*A}eZT6_u8Q4XmzJ$V)AM5`C4`9m`0rKasWI62+7Fh? z4s#J_K}~vW#;nS;F~w3y+1GtO%-6g0h{sK4yU9OI}7(zzidk1m|;dN$5hA z9n(&ckuWP*J#q-6jbQ-Alf4lrOc-}?Q4U^ds3-<%n71--b!yPI`teWj_o zr};<4bUUS%jNC%7z;_ks72{Kfo`VOAtW5&i@MvAcj1^kdKD3g5#ndY~At`VXNtn?= zD1I%s`cGQ$JCqM+!>neM$8bGKYfs&vvuM!Q&ZpK>$~QUnnJzA4DDT~ir2zZ#%p6Yr z-`b(+;l{cjr{S;zVcjCbGDK{Td@%oRmjKOY{pgdATW(F#oA%QHpYK+cmDWw zg%5Ejh#wn)!Mq2W+S-U~T6}zV)yLBlWzbk|pp#lC6hwfKFrPp4HaGW-VNWdJPB{h! zF(}-BKQd_Y+GsMK1O`)=42gerOY3v@O^vn))@G0r*tu!>7Z3Ubw2*I=iFascke}%e zwt{qoi#dJjlv-qRh>9c0Lvy>Q3`=Ey8Qq87{L-QwK3|}b+fG-c>2y!wxQlb(4xV3G z_Uo}i6EvGS&#ztHV~iyLpmKjuR9xC9Gnv|ombLj&5?p;`NJVC9h+AODr`C!YSC^0I z$h-cT<|cRgnlV{lagC>+hJC!}@l_&?Sf-2OW5C%|mwnG$^L)BgIy0CJ987T&EUo~J z1%WONv(b7lxmftAurOcs^xCH@r|v1;D=$o#HMC^*WZ$OE8YTIvHCtA{*_b7zhD-08 z#^^NAc6@u}zO&O@C&&HEr@aW=o*CQI|JnW~hSp2#w-~Q)*-34J`2_We`%Ckhxc}TF zqsRM3eS73od|sQs_Q8Wy4Hx#SxC2+TVRw-(=Y3Uw)~nP6pNFkn+U%=g()X zeGW9qY1H1t#Ln?>REi38LE_9%`brw8TrrZqnJ*zN*iqQOROM{gSm*EUZ|96UEulBS z{b*=8qMqm~K|$Wo&`HrC4iu(m8>#xdal{oTvDZecKXi4GNo?rXkMA~!gqm%S;0E$c zG|UKa0#~%Qj8J|(6D@c4J_L}q4Q(v8(wo2iRy{0-7FohSmKQk9otyls<+&$k4;9wx z>S>-4nHHO)CDPe^jUs<336JDh9Yz)ImumI?`1#_zyu2p#G*n)%0LrO4fP05{xn!%Vz8_I?beTSI+SI9X2HLq4d?$pah_E3Ce&UD* z#%jF(7=AKE=FWM;aW^&wC1-=^x~%5F+8mb$I734abn@is#{DN&B|lVA-_Dvdr#98} zjZaMmU(KS;?XNF$VpD&*le>->kbv=?@QBmQ*xhiUC2JeYT`iNVg z-U#{vzJiCS_|Bef#4xxz+b7dTG1?lCNaQG}Xb@B^+HKxy8BB1a}cltjD z*rh^+mv?n};HC$M`!@lOPW>ER5T5{s?$vr=Q?!ZRNBrhbr92Sl3`Vy8tACtnKK<*? z9HR2l8dPhmFEKel&c|3vG7%j-n@{+2F}O z_0j$gr96bhk>PHLA?pIfIDJ?YR)@diCjbEl28$32>g+K8{gz+9?u(~8Gp z&Liiqy$}4txV8W2nv7omqdm*^_=l(Wq{h*IkP`prAM(5FLGvi5DaHs*`t5t)d$;-4 z6M2Qjcdj+mtdonFVV)6TshIU8&oj7<)}U{56*L{Z$ISi5M;zN~X5Vjkw5G%V?;rR- zzU9%-yQ19}xqo1KPaG4t?;G#w_*@cGiG=Vz;%($JRfPza09|;EB^#%YmPFFO1iA4K zA2wV;S{dk$}q-S$=LPy zd=xMP1`fQ1-G=>VF&k>4tDfEkn@U5CPQ=5g8Q+KK$5IU@u(G?^5I%W}akqQ+?1}14 zfW!u`tqW;FJYZ>JaoPfdO2|E&s#a82uOXNHW6RDq2`gx!fGlhPgTW#6iX)=>8w7hj zD&MzHRsKxQn+-2CW#*9cP@>ty?0})5-_rd>R-}S&(j2gK)`?(M;HAjOcqC9-oj;OV z819tZKu1T%9LXMJM`JCO%zyLqOP-@w8_5=EpeLVB2CYj%9nPX&9t-cgv#DWsIsQy; zisEqG&kLGrboL6He}`l(_2$j%fUHEk-dg4+kJdKv@mUxwr&^H9+fSeF=A~t5QT8D_ zAX}zIGj<}jg1*xM0SHYz#qf6kKwH7Ky2rI5o}V&XHXL>vrai|K6Wb}ciNuhS{Z<#e zPn``2^j&ZEpw%DLl!MOvIp3GnA}xf_#kNzXkachF=pI&t2ER9RhZrJ%Gc8REz;eEQ9Q#oeZkoS7P6Mz+Z(`=I!C4DAKCj#&2L z*;Ne~?9F^x1V|E-OYzphj=qd{PSRjqiEiN|Nio7{yf1f@7Yw2l6fkM6;jplDZwjj9 zqR1fO{}!{0AD@}5M{vduh;Dx6E}Rs3V^wZ*8LHRM)wwDLGzzhz4$rHAr5svyAvjXm zd1Mba=VIu9DFVk54fs^a2PSJvV#R`rf?LO=#hZjxj%jHZmo&e^)B8fPtBYr{6|qtU z(iU=e39h}dMlGyropjx;a~`SPxu65=J(9$9px24-2ZU$lQF=)*49}qFBH^BYWyT%% ziI0KtRu2!l?AN?U@!0U>5-2xcJ}Q4}=VhyGJi(2d^TUWFVtk9&7!i8e0An(0)qSo1 zU=4_sr4nmTwRab-H;c;eBel)sMJu0_PXWkHFthw zacLmg5yu-0@6&5*^l-#>&ePj(!V{7JU*tAYXv+{Qx$H%O0tnE_d$&wX^Q;HNEC#q? zW=uM29vkdpf{V%icHt7wLc#F#g1&3m{Bfep+v8WSeE+ab@FSTdOhkM20OP`|>IyI$jS|KhUD-2V_p>B%A6h&_XXs8*;uX zo@C$Pe$F2!&zw09#e(m?ecCRBObfyndrbLCDzZ>G<-QQLYa++ZIsNI0rG59lEBIr4 zargc1!+opo#=WPi=8E85$RkGDWEtpJ@7~~GInP9Qj-fVbahH++W9RG`gkrI|2riD1 zPy%gr;O$`6_0+Y_WH(Yqnp>D)8s?IRd~d4kxQLinZBx22#VdBEMZ;&IiP5ebUPqW7 zS4)V&%tVj*B+Ow{hiC8zV(b58+rm116y12taave97*Xkh%>k@&2RzPBt#3`!b?S!U zdoTxLKem`22W}j`cI}mfM?sdBmNgX1uTIZiB|_9HNmRLWXVk}ls2>t*ESTPPtc{Wz zbvNye3;D?sGlE~MH})~VkJunKBW3e@6cJ&E{1WRDC*GF^({NHZpZc&Wlaq!k0!mCxS~%qe-4oLFFWa?cVc z;kTB9!eE~Hv~sKWn>>@=jX4~0f11X&q$22=Edc{kDzgjAB14v>uQhUA)hJ?pxNU`J zL|$TgQn0f4oQbnAaGdYAl9HNXZ7w6fFlTNxd4Lf3$8|1$04dPrY@&ixQ>PWx$qxA~ zqGIF8^3P}*F{GX?$s^)taw~?Ib`T0011Uv1L10^k~yBa+=PmHLD zp~<$mVyhAn8uiu|UW*HQ8KXNF&Z!VK4kup^S^R*t`R%0`>D4waVe! zTQzUaRQadJT4Zmvep#HJbbn1B_Y>c=0M(D&kVg*O7tgBkRc^LndiT_96GLEyZs`a) z{tFVY*&0qo1{$t569(`ET|5kDqONyT(BFuL)3a2>?|?CW2E+0VzQ7LT0o#=I=-#~{ zmTe=IsaY%Mj~#OK3X1XB`$wus@F?+Qj-qid?4W>|Sk@(4L5m z?litC;BMEhUB~}1b6N!>I>boL!csT3xqLIgR^>$0Gup>jnVH`K#M~_ylkE;VM*SGa zeJ?--Io_euTQ9V*dzr!f5%ZRItuDD5gyvo%CS{1ost&oKpt9yKtUq z%U)FZftbp~H;5Qzr&z zPfk)Td{Z~+eT?pxLoLPNOqE?QsOB5AgOc8u<}Cr^P~U*(jd&`M;g_t-#2rVyE_1a) ztODY3DxNO7G)3P&yOx#PJ){b&fh0N**1|*g;8=TWkmK*hRVvlSK-KMBi zj=T#r1Y$ly!az2Q&edJ^_rEZs*kP38~7?%{dwC z4!-K$Vc@`)RL~Qy#tri_zQe?Yy++af;!1e?!=0)`$Fn-Xd@EfEUXVa{W$^{~*@XO%X4nDtC$Je;W!QNG??+Kd z7fvem_Nz{guda^|UBX>8yT%q08oVT6!@FS*Zi{WSt)gef2fBb^R2B`3Fb*D)!NFWE zl4eIaJZZ`^G*i{MQ$M$`A|Me<3Tz3C1rufbyb1rdS?rJ@L+Wq5XZO45M*D`1b_ClI z1Sr!L0EHy&j-`;uP)y&*fnccAo;&F;?*8$|A;cM5dWRLU%^eehMoZc=l~}X3ZO4HS zryguKV|xVYDm#+h8?S2EyJ@zayWx))W?B*`SbM1LQfDwY1F$+jXQo@3BZL@V{(&}) ztjQ654vqb2IxFYRg&P}Zdvon~?K$hRN-gdS>0NxdMcXxEMTP z{zj`#?}@24U!55JdrdEefN(WQk)x9_z2aNxPR6&y){t%4k*-y1;PRlfxXy+>Z<+_) zsG=AiQL9cP)INi9h;FeAFrG1YlZkHC7>%&9;G`uKfXI~h;8n-ka@SE~^dPqgDTfdA ziEe5*lcs6Om!^pYC*xo7K#10YawQvY*}lRq!OEh6ZmoWbtd#IY+ECx-ONSuoM*fw+ zCh~hTp}%p;Nyi!^!}8v1`Nz)>Ek1KjweVg{hw7X0hWQ-And{$BDJxi@9@nl7+OKCP z=&s>gmpY2;`Pn8`RD#I=r;(dP(;F3b3E_YQbjf@sa_-_EHV{DS>55yY-u2r?7ySx> z_btRETAe%UtuKE>!h^DvaPo-!$jDl!{S5#*Y7;w0azP7-fw=UfPhnt)YOo}eNelxQ zqdh>d_mMW=SoY%E<}MoXK?7IiOmvvfUy&rqz*>VE@~br+9{u1qYWNpLuKan&U;F;e zuqWQ_Yj{W)eKawqE3c8F(6Ljepc>1I7KI@YgZQK#f@yS^zs+#pf5SSv0^SF!u4iTT z_XTdzZr}b`*rkF8uk*UZYrTMBdm{d-ha+#SdvX6xP-t%H{he-Zp8^jceffW3KqpU! zhc}}@E^e+ulCx!V*tT&w?U4$jCGc&hPo=v~8ep2V@i3K(R}8`!`n1 zVwe@VK_d}CNif~)*xG<%_@*Ln-7gq?4cryY7>w_+e$UkISlN^W9#E|d9^dsM9&89a zEv%#h-n?CYHQ)CrRq^$zlJv5y6V<|w%Bs?r7b7Aj?Vtvl+x1z@Q7Z{Z0U4LO67~5j zs?##^;r${yS#UK#fR7Bu93?s3>zukW_#OntOq^h1Pev%Dbb!T z%^Atf2sDisT{?9F`3}ifa$+}ASC=%-Ig1yMquwOy`*^b@B$WMrP=7N+GR0#pHfn4J z`x5d%l3+j&$joF6l=O@eaZyi2SzBLG{m>{iwRAHq}nfL>s z@TD<~$V>^suOI=QY*YSOgcxxKgP^VM%W`1^j@}h{rUv%?k%M<{0)vsTU(U0 z3K@jn!G$A>b2r$=IBr3%yZy%%Dy6FyDJV3Aw9HM!;|=1!_~LcPF?MH_j= zgP$~!j8t~gdgQa&yiP3W%tK@}xUY{-$3=D%CJf_xubw2oub*4CHX1b^P{->{>eU;s z<$hF$e|CS*ORF5uWLzqtjxK{K98m)&!asR9W3?Fr&bbmP-i?d{IVhl zGIOBwftPC83;QyT31Ia=iZf^QC z0aj?HsoC{<072xdK0Mv4R8}m}PI%!S>}6*SY-YHn(9yv&V8hvO%ZzU(F+`N_82zSzo4mM=5j+o01ar^GVcy3okY zGrY82Smn-jPwwl12KdFCm#hP%;N*Od0R~J89MLfNt_LWoq$XI9x)g{Q6F?(GBp}Zo zJ+jKK=!n(p5YQ&g$C?9c9|FL52oJ*r8!h|H7K4KeZ4b>WS@=O#WJ%&>_Z@zA7F7gEU}G&Un?5|Tbf<4&^7}bIJuUWm zTHJZBGGpgcB?~_9^Jts4c8!O9oD<7|co#8$c8r?Y8}Bw68Xb*nnl^7f5?kNY%TIjW z*kF$EvauDMxlSo(Tl)F>c6AQ=U${)3r-zuG4CdZz z_SzNIzKHjc+)n!O$9yd%@3j zFPerZq|K1^zEV0<$*MDjN<;k#-@l#$igCw_^|?KncsXG1ym?I(<=oGqyu6uipXXMe zG`!J?h_Aho;R-WDuAe>;sv7Os4n8^9Cp#-^!DdSx^U(q38>%!6U&MtcRW2-~ zTyO%XRz^YcbRpm@`0h<=FI`KW!Ok3}Qc+f;e#1C>n!iB1bm+yp_ibL^@GThg0Dbrf z4%dzf1IL6;>nqFj)zvZY+tuDE$2BeLO0rpUn04U|QSx*$mz{aJn~7r}-D8QB^UFtq z8~Budh1E94>CHo%D_ha~^;RR*shzkIH56%K4%_0LssF_752hD-_N*5>P6ih2OH}Kw zN<3<;Y@{AzTa}zz=HOX=?&{N-VfNp_XFb8Q(Ai^jc8q<-frI6XW8!-S^xL|?#I=0> zjOc*mnhfWfnf{^ZV-p-K_y+j}~l>RCn3{D`&Bc~TQ6PtKqp-S_hy>=-?uo%l!L;zpvxbGD8io<|V zWo%Q=so_JBBFHETg~H#^NBMZ~f3mA-&Pl~4VI`rCD$v4u70AHHJUn2_+iK!m{PJMX zJG+-v0jY&!CcVr|O)>$mA3tHj@uc@C?0idxtogGw?HY=vQKSJP5H(b$$SxM&&KeA zF)Q)Xy$$uqR5pl2PUhQ_kL^ zPOKqrdBsKo8-!g277|^Tx|*7T9o}L34#L%&!lH;!k4+rBx|MU4l}*K|+=W)9ZHB+K zI_sWgt@0k$LskB#>iPjYi^}3DZedDC2(jy+Tt7Bw#Z)imC4U)LL8s&!-4Uqj3GH?# z9i2CzeavV3j$EzI9m;&RaA{l*GXT!yVZa^#3N1sHsxWr#t}zG%cGpmx=I zDMF0C<7f%C>z(v{43SfGAg^+A8qg{s&TnsMIO&4CbK5Iut;jjaEvn}WKgb}w8sHvr z+^G66GUs;Z5qjS=E&huf7OYAmc6XaWb*O>;Hs6tBU~2eH!p8K*b~PtHSwgmM-M`;Z zV7yX|h3%+oL^)L)lXju_cWTWJ!AeWUHywM?_DoQyDl0w4E~v0Yvu4NW030eemRI&S z`)pYJh{Nqyga`CEvGXI^ok1MqiMs(6sf2!QEYIkO(v(_6cy{cqA|IkkblHg!(^q$!C_TBxpIuvHWu2CGPp`PF zR?!(d{bEC<=4rPZ;Gx(c$+MaT&NqM~Op#qRc~ND;$>{V8VfX7MEvd*(G>N;X7ojvP zUJ_b7Eg<7kzbM}e=Tqx$d{=T{c~z&(a_g4`BNn-b6^{Ar=6Ek-;f>Qb=9w};XUf#6 z34%xkz*9rr8wnlgz}#JD}$H*M3Vr$=4N zT)cg*)WCq|1I1*G<#|j!JElNhy>jI-8zmd!D4ZxtEmAGMPFU@-{+GuOaj8pau|>l@ z+o!-DGYdjSAR+nilvHG3Jc?W8c@$CgOFPmg{fzD}i;ea}i5*ui`J)XReqg(nb31wD z)AEwk90Yw^PS<;iFGPBg@!iiprqfg!3j-*IyZ5}gfOr4+JI%O+)E`xbC^Ltn@)4gS z>IAhvTVAjXFs$WiAm#3g&jE6T@QluQPn)J}JV_bFX_b7sbvy(yj?kg+?uxS}ZWd*M81M6Fam`m}Sf%IUAW>2_AS zTR8<)=7FiIG}^MNez(iU1}Q5>98gY5&8?+xpK4{FdcdrFx1M!L>O{BD_gx<4l%3Y2 zOZW?JZT0fsXsh=Ps!_x&62r0=Qyx?CCe7{a=Iqybzf{Q5|BJ&3h=4T{5tISU@&9s&Ne272F2~d$}nK@*x?WPwmrP)RN40McI_hCoy#P#a%G>Bd7rm+ z5O7|!?g9{i3;Q#9OMS?c2aECic1s5t+XL#~Vof+ko#fsOB2uW1%rcR4#!s!WSxg?7Vbc_rH|5y zB0MdfTp!MjBavDwi?vkOaG+Ld)LbKAfK9j2hTP<_b=R+`1--!b#-r1CWJ1v3UizVQve4-)!5#Hp**vo@Yd4b&BiG}B(8*H)#Kdg z!jc!kH%Gt3U9;<}hghMBy2TU^g@=nwcGFeLSj9XNDr*Am1rOf3ZCghI9YCcvojcc) zI%>=OoF7RKgXTe-zX_ZM5AbPU6MY&XNeB{EH@_CP@eN8Wt;;B$hJl#KhrR3g)kTlg z)AHb0k_I<%|4F{YvJ5VWpF9PUjd+b8rYkaYI&kwv1G0(W0(`1U7~GYwin$LK{m$=guSEn*tKG!7)Sx<}LEpJnC-) zKGKFq6Wa=^m-?JFhyX|LT0_R4sF@^;p^{+&LMM_@k+k^Q($}V_qa>4htLgE_R~vm> zNwyB^i&rdoaeZL4vV0-J;vaou6^2qcS*{F{5bijal$I1rk@_QaFHppZ~ENbj+CIoBF=D&bZ*u+NN&!rcx|w3`SPZ3BA7xV7H=v_Nl7QWFy^B98=9Bd z*p=8y(gGR05T;T>Z4e{ZPzZcY8pC!F>aNbE=pP&q>7lmVLeku(F$|^r^7)hXg1^)J z-?EL->hx-_;ZkGGYACKFOX-#lQT>+^rCy%IrveYNlw@++wrveB57e~&{an)sfVV)1 z3v4^FJ)3dQw%309?%j^~bwIg9IZGr;?0n!GtXvbiWw6ck`#-og?3)&V*bDx~(HhPE zdcZa(3Ky7DdS54k`RSEq9mP(-*8)mBJ8rMWUZH>> zbkyzM=98}HkY~j24G7SCW;)^157E*-&f$v7&Gl2pR96{TFH`p5N-R zl=Fne;sX1L#N^n;NYF21#VdxEZKjGtqj!;vEIF8v8Cj2{#kz>GOEe0glw#7YZq$#^ zO3Jh!05Tbr*iKcpO|2iLoXDV)lYfA6{}q^J)oCw|TQw1ofn7)^!b&?qE4#1Mj#zJE zxYQ8Fl4pwq$^lqe>#_`tqgnSmxM|6)y*Kd~XQ z<@P1oGdQOxh&NLnWKeZTyeCyw<3qLoQr>GF8rDSv8{$SJY3v0NVZhIQ&jjx|zjgZd z%k)!zYh-jt4N(!$cFR5%-yW!!q5jl%jqm>T$4U(TL%7b*{7qK^lm?Ho=oA0+(nJ2| z+Ubiau7s*whSF>V88!Jcb4usW$Z$_FSgp*x=E~n0_I}uj%0B-?TR!DTsR;=IJgCRO z)B}u+?BUh7-v7C6cIw!hAg+Wi5=TMv$5qzXJdJbtAQQBHK5}F>QZ-CcAJQTkDta@Q z_qAzub_{Dpvy-bXtFbmv>_LD3*!%a#tet9(t7=a>HU}m$86{E2aswA# z(%nV*`8^rcKPB{&E*rFoD|qYdU$^ScSY(nL9??WWux)+k<>XhKoq0nG;Aa#uVZ#zs zgo#|7|ER54$DpZ(QkrtTH*VTL(e8JBEiJ8Xtph-h96u~-Io4tK?&{_GK^OWlPzx|E z5PVU5HE3H&`#eEQ(_!#{0SD0^>s+@oH*b&S=e4UK=PcVKlCf!cn!!Y(b1^N$$5MY| zJ}U1~5#Zqo^f=}ygm@GqkmpTEqrCq&iM8D`3xeBXS!(5$ty}j^WDA~{d1L+P7cU-% z2TM|3&7m@^`1hX&`gkfY7_V5qylC7tuwl`=64()#j*99DZor~(HVlyaK}J2w*zmM` z6iPLR0^`Qc=UI0l_h@sbWHuTVcNv}#?CR->Vl$L2ZNR0mIY@*e0AAs%KTdsp3GU{X zix-b^wYBQhSQiaMHbTE3kV;gkm#v@D)@30G(Ba;vA;x2_ z=T?BEY_v+TCGcvUxnzu%z3~Vtf>FO+sJGfpdxUG`?xzL8&Mu?JjXMdeEJG3`bq}pN zNrGF5*YhZtw5U?;OaOyCBvWoxcET3QHwnTh=sfFvPAKYI#g)F1+E*`T7OAvGewSBY zjfm45=~VtziavhvaH_(u)#9ATYZqosGm(isY#4o~q^@@zzyDinUVJxdSk^SR8R_#m z*ck)WlIaJh=06O$vihSIX#QhTw_o=qbHsQ5{x_`Kz>3^yN-ZFGiR%*oB`m1!7O=Sg z7S)RZ!lE^O);8wlpv_uJHYFHc4Fx(|^&fuN!ai%w=r}>NNxKmOZd8M?f^!|fS?!@- z2*LUZc#yWTn@9FEltAZ$){L7ve^j-Ge4;T^)&ACa zKW8SZ;xX$WJpRIV+e-V}2^T^y2|-MPS?5ljG%HP(VBydKBakjX>-n0O!K}m6&(HUY z8TSN%9fg6bU zgj6_ZGc$R#l~tGOENK`18pE1HiBeL3{!%Tog~NX_g1(VNoA^kX56$EfL1Y8~Is zGG^?aKG7|AMB3>vsn2q4jM%-I;*dYn;NPHuERhL$yjIjaBx~V?z zFRYq!pe(p3hI||-z$rL1yiu?shiT2MV63RoP6d;xs-U z7+4c)esz<<&3!sywf)NgN&FHVNpVwRzfPzb26IQt-+cDnACn3^50dm^M z+NAtwfPq=Xf$I}7?>rJ;yqLLC1PZ;5wsohkN?P2X zd~UK0za)+!&j@XkQ8d9Whe=GNusAn;LaHa7r@zb04CSG& zszQ{<(3;lpRe>#+tym#;H;Lt-9gj$dBz0g(wb%C6U;w6$;;74n6k~l&xgsu+Y z;QumEPE(RBVQofysxSzXhaW97Sy6n6lkPR_Fvrp33hgwEWgwQ)NrC(q?HKd2oTTu+ zty{O=!-h86K3*tKTHV+!OB)YQ-rcLHXN-~6Wh=;IB^b6nB<9rgGrcn1d1}(ibAPPN zd(lRbkEHomUOoJz#`75Cdv=HaBBJVZZ7GKGcqxPply|O1Qs&c9$h=D#@pY^)NG&vD zH)Q?2j@rRqOANJ6xYta$S64x4r#O0c_xKj8bOSov8MUg>6O)K!J5*Y;7cKH4%5uP< zLA5sCbH7oeAkWHjo%&9^G(LCdXnryOH>UOw^MB?WTN}A`taC@De;(b~vg#5ykA`l> z&VJT+nuL|#eehsnx1Go!i0~?It6JIeCH=*a`A!!O<>OC(Oobxx!lEQ%cZNeRcBlm9 zI0>ieM{hK=|KexI_%mnD9+HMb2nntTZ2y4>qE9`Wc6{;gWbHB}d@m`>%%40=Sc54N z;z?1ZrF5@Spi(4;5RsgSBygPQ6_$xEnJ!W&8GggDM2k;^$w`njO@=TDz(zrY_us9^ z9W$*wBCo!VHsceTa=A?ZI0QM_Eh^EV73JWV$LJ$>F%mM1Jtzt~gc3XNt@bW3s2kX& z^w>QtQfOqdSTIE(JnD*ILk$2?GR22CBUl^Bx5iteAl`p$d+n()ZbO7i4+BME8gm&$Ul)_~rm*uE#+$VEN(-=ego zUH6e$?a=OB;V?3v%uDMvk30Rh#6%x@9{m8xZc?DheyN7-9(Ml_y|E+h~+viaG6gT@~0)oxS4%Oy-6Ei@J zyL*&G`Tg^4FM<cQFfFs;0eOPnM2DU|G(JGolQJQK`FN;&-f#C^tifo#^;M|4m*gBREfMjyATcG{j*stp0Do4pK1pHdiP`Gup3yPA#V-x2 zd?M`9lQ}oCiV=9ngl>3|{(hYvs1Wg%&cC_9mnEjH;`N3xRBQ>hrEXw{zjjwHo4cFZFp9@ zTZ6HFDYi>P?`e-&)v4J`r*nOoC^R$oaZ1{bU(6$t#zmaSIukK#Mq2k5%hu26c50&A zh?9%QMkFoU)cw@4i4)A4h4%hw$FV*6mCB_94jg>@vqn?T`=wRuGv_Yf|NgV_{dJ~o zAHB)VZK!yrx;e_s%yViJ1^%Ab`xg%Kwxj5Q%a;L^AiJPUv6)R2@O<&SH$He{j&sh@ zj_unIT{%|>EX0HYZha}a@@b?(oi9--c`yf+9N(vxmjq;m4+6_GB@YMwiv1ghR4_@W zGa@-PB0pIbx6q+LQsS&q+_aMxdlSWnR;yN5=Ym3PK~M`uT0=ps5%n!*2mx*Zrk*4v z)gC&%B0nO=80YLJ(qS5l1yM$@b*th9#u8q^L7Evkxtm{BVn`Egk7w32@=!Ca*Ma{N zwd?R$=zv>L7+Z<8$$d6}Y%*Gur#CkA=hwX^s||pJ2*v{~G##EbnH;aF=^!k~sbU2r zNBwwB(JlAgO1%JA)OOXXRTsm<&6q)Oxfd!nFnO6oB8U8qUQcFuz2**=Np^Xw`3-d} zbym(JYJr<(uuIu^=XI1;6u^k(WE8S=GnK`CF`u#^PsK6FavPqtW z*Xyu#@9OKQZR;rD-xN1bv|H`2{1i}Qa8%@ToVzG)F*_BELON4R0Z5~ux_`z*92CfT zdpCwy9F=m<)E$ItFC*ZUjD zSxytdRG>SSqItj02$%H-PK1Wmm)}o9@cCsWW>5uT%QGz$Lun`rVjJptT6{vw*inqD zAVi9xGC}z5Z(PyIk-K;9%qUb3v<~%3P*Q-MI(IH^R6Ez8HRWFe?j%(3)svtxXw!Je zW|@}NZ@*rL+XD<1-+Wr|>j!BL;Q@(lys0@HL!!UGzgp9#fvYuc?OHV?4)M%}CG+QR z+q$)mVz_hV|X;87XClrMr5pqn@k@5H? zM`jQ38EPkT%rJMA8))$>Sm!ddn#npEf(N5CryL%xy|+E%B(6Oe<{6gulGi}SgBhkI zkh9>)-$pUTs;*@{N97=2O3oNy3t6S86C^v9DS0I-fS~{kTne zYqM9_)f`SF7OJ1xV)sD^aTQ-ag&uXl&k3ISP7~Cv|=jL zmh$rTOm3a#(2A4#M#6_z1dqW*OIO9QEJ=qoYt_11Y;$t#CN>J#=q=4(fe)X&cyWGm z+pUo*J7r-D7Yr1VjloE}W0T8KmWeMKv*7n_2iL;_alJg}%=$R1Z?9$PGrdnRl*g!R zSWd}N#|dX5G~<;u*pjlwkLVEo-(?7x7R#Ukvpl`M=0dJehR@6`KY3E8 zwahtWW*Jf|u?TM{T`WN|EX2T1@SCLDG(n$epfj>j=e_2=so=!Kh+eJi*LKRAnG>c> z+X?Z>2D=HnqX;(+9X`3j)7f1=$XZw8LxuVpG-%p|A||B^6^6qAQn=-f@9Tek9rZHY zwxfEJ6dw`eUIfAs!5JN1&HSuUr@{PCA4ys?ZP4^+mzG$LJ-xgpj(JQvhIME&PPzq) z7mu7a?eNUpYjoKSo;-OX>EftQFLJaV(dZZ@q^uk<)S}w}K;J+~)m@{Kz>HY(v*mqE z?_$}-yn!cA)vD95!Y5L zL2?5de2QyfcZ~6f`NhR?0H?Tg&!qnTSsY>_Sq6BMQ33k2A%D8(l;YV%c>qPXA{|B* zLCEb_Ax55^Pr3n2&mw$0GO{1xXuGEM^0Nq|J;HBSlg4H(M^_kRyZ7L-v{7R#a>c#Q z7>YM<)O?~*AJ8ZuPHIACchvoL8efmkY@^Xoa65PJ?!T(_b9>Z*SC(beDZO2{a1rcu zy+)006Gl5PXZ+Vy{q}07A6~r8mUL_b1A|$}p0ALiLovA(d+;Hn9`>DxXk=PoJ>oo0 z7dLJE?Tj9hamat{6;c(z*(NX}4e4hOFk)VKzg4!k&B@e}>~9z!6sNx>z5aT&{k9^P z9}}Y@FT^RQw|Rf!FZ!&i_wDM-f7)2{+e%e4^>{p|=C@PCw5V{RrH+6c5IyS87Grkh z>kLtQk~l)gXW4F_9(sEHR}Fan&e2&#{!zyP1D+<$J~$l_AoIB!qwyeh=%7*S!X@^1 z>=K4}EQ-<%_Ex&hNVg5KB=v)H(E{E9bPgO$LNXWz8LU(IJL&M|2!Es5mR}ONB zFwu`DE7ZJG(-+QQC>;*mpI~FNnRBpc{G3><&{x_m#2HoZ`|snCCma3!IAh;@ z)23@PTPA>)t^-mdlIYocPImH{T5w8oKnt9=Y+2aUDDE;caDD}fctz37UCfkl&wK3@ zIk~xv9jOPUgn)tj*=O2i_`5&dYegK155;VVEm-ux^2F-bpO9|XRrT)OJiz`~bh!LO z)II(Fd>QlTToaxr$Foe<6^SnYId$etM#`Cq-FnQ3YR09%ZS&?@?1R($W5&Cf7I#0X zY#f*BX8M_{;8#REFtYTmgc^>pvGI9g-*;X`?s6Id$svRtld-&9`&U=R_p*cNCs}J3 zd`7KTc_uCNW_nNwFg^6%#|sm?J;4IYIehuP{z{!B)#vzan2lKoe~t~x+3rtP5gAzzpqh!UBT!JpqslUYJjM&XLQt6@FKF|so%^+G z?wa?K3RbPDYN)Q&u7L`+J23?Z0-G5cI zPYUWMUwB&H-}iip#=V&6=(hBrGJzOOn1F(dEuxzlT!OWcnB!!XA2G zkgKJ+-czDc5)iuD0>0uW3UP%}`H7(H*)m7)!GpI6aF*Mcq>LW(TC-m>ZWp=FAXS3O zO2R@m>eq?a#E0?7c?CpK?|7<--8j+ec{MnxvQ_9}uzjQf1{j z)N6rbif8{_GZz^{bV{WzF`={#y)qZpi!K=5D7xVCwTf-Wk9w3vT~cnT=e^rBmVrh_%LAAmY74bc`kANmfWcG|;o-f5t;vXN*}ZGmS1B)VY&bBw zQ_~-9Y&v!`K3@KVjS7QMBLzCJnt7|E@*@HcRt`!>GvCT+<$?omUO6nN$o&QFT0f#| zo}}kai{|_b*^Qmh;m6Q-^7l{U^AKF;RItH75d>Xd7|IzOeCoBT(0t%42 z9?9BGu_B7&)3cXPrQVh? z-OJEQkgwMO7ukUIMW<3edMtMO?SM=rd2j<`W!JRy^ktB5PxPa%U0ad&@xzD5j~}=1 zrUHOE&YI`)$=Lt7tn64=*o29oJ z!1$^i=iG;9JDD7>6eG|-3-*zD_Z46vbwVdL2%WeCXmxnr)41@MUT71k1d^`4(99f! zL6^p~5eFu@8v)aLom4&WH*CnCkXhn%6@W`8mc84lHlw(Eq-sqI*TU3f69PYKC=jzb zm)aaP}2uw=>m7p)FIE;zo>rRkW|vV}Ry zC$C3;jA;{fM(I6lfs4kk|9GZTw0!E*8^_5^VCrY5MU`bU&JI#`{Zsr7B%~7gdn_`t zIcxhV1IR}jx%8iuabHzH4?H#FP2#eu!VE{x-|f2U1Y3s|M6ZysS?|Z~{kWEFcF!GF zr`+O5ds-P0FkboJje%kLo;?^OkE7M2;6z$?;zd})h?ovu6N~{Ag`ua-ooY4eU9QRh zZWJYRT^Nm0aYMd0RMyuS7-F9KU2t7;1IJ z)#>E@y~tP-TUt^Q&)E{LTz1Clvy+q4cpIA&tBz3R!<=&7&wX#F*) z*FAUaNH{oHJ@AZMT#>te#%7=uMRB_}_MStOQHSwBK!Cn_{rdQt=c|S10+O2l_P4g` zM#d|>tu|gMNi9* z>(d8$;EBo_+~}@5bi443a>GfF^Z#-8=HXPX|NrP}qCu&s3`uH7qp3k5ijqo^6tT5J z5s})FX|a^Kh_*}>vPB7Dmmy?{qEbmA8ACEs-I4r?(LW(lRJEf= zBaX@$@!f-;#~LX+I8BUUS-l$rKP1yp|eDfE~^s9 z@z>%k5)!54{jQ)B0iY)}Bu6f#eScjH7UQq zE{B=8K8G2vz~GP;qUZC08ycjr@#GP_2mxWDFd|D>WEvY*l(+I|JumKoXQdd_M zN8$wDoGk*Z`K}oOvF& z>-UI=H)ymlgpZ7hCek=$yKPIU*%WwvY|G6>3q4^8x%xJHWZ4FIw!xb{Dt@?JZOwPhc#M%-pN~y(`pQbXxXF zO^pcBZG2u#cBt)@c&dd^dVt1cvJz2hww?L@Pm85HWw|H1Lif2cnFE&VfBaQ3{iN)L z@4er2F~`>67w+iJ;I$34X{0|uKz+&(qOhqxJKi;o zW$gu0H&|^20Uv?FA?iVlQFB^cd^`zSCEiUaHN;S3ld;zN@)$@%)*sMp%&l~K_ybo} z*9@3x{Rab&(Fkmicvan8POma&3OE+1&u7e9xYiZbA>?8d3Yr?A97LoGBO?A}^Y`K@{kQ6I z769>925d?hpH>G22pWy2SyX_u2gLax?exUfO<)*we-u~PD1j{*!!ZXVg*zHXOc0nv zAw!-BqXM8@(hsS8nC^mr;gq7YKNhTiqvSc*ubW+3NKP`=l|9(0)cxW_RD{NVDy6Tp zMp!DlTg88xBJe8+DyGKsxD;qHU$AZf7>AlAd{VUI$!|57~>u%y?yTtU88IiIN~J zWaDPo;-iS80!Ri*j!~M(oeN{t&ry_L^Q5j$6sbiQAy_BtR!N~A#WK;)qkt&^!+F}_ z2gIFcIMWy-r3wUq#8W8$K$p*$N&dX_ynW(G6Gsq;5F)<=_QZe~+le294R(V#FK8ks z#kX@zP1|^H9rJCi4z>=L#Pga=K)M8s7X|Fu&>8lYAPXF(DCYtL*%icD4lVB6>S~@( zC1dmdi9rK^%R@HU0V*}A|2Ka)T+W4f+hogk&Z=)~y95CR*!Kp&FodClyXBsp$d8Mq zWl+CBgHF<|N$WGp9%K~aB?rSg2VMY383os=LjFyW9x>tDs;G!r@uZMATLfVJZBySb z^Jm};z}S#K&IzMb!_D0wCr9WBi2I)5WFcZPoZeq+9}7d^lHoLXzlGNH8v($GmWCjT zz$BY`pizXW{!@)IhLyq~Nd!tdI@bz5&my}Rd8dt6rc$qyh~IW9hkPFJWerL6vG{@& zb3A*xyIplw?*vYS5~`%U-1M3a3?*d18Y)B7mUGbMtKucxZmza-XFX8SU*!R6kVq(? zP`;4MHn{OB<*rG}o(!EG!RnE=L|jq`#=sY1xF5|#u58Sl!F(tWSb%4NAp-IN zRjnG*uArnigF%Eu-YF}qZN>N-{u{i#Nn&D0epnKU_+HKSAr4J5SjD+_XL=gBJAx9d`Xx-8;yxn5RDOK0nv>`AwtiG(y zB|l@tF;#TU^51@Y7@K&EC+TaQR48MvOe^nL5~rc|l*z|CyxE*)dbfquIv0A^5}M{s z*LG3P%IcNN1{WPv^2*)jyJ}^w`xnz}$M%KU;N>Vg%9N=i#%*4TEVkXK{Y_faf`eY$ zzulYlNqafA&6{2?mMVDkBrk1IWsud$`wYK!PS+d@4pm|8Vz#wH`5fs5w=5Qhowi)- z>C!uUwYK&d1!d90r%$(xRi3h_s;+#%4YBsUb*HRPe{o5!M9S@#Mz`2^+KCP=)pfN? zC|a9f%z9bBV8s_e$`OV4LVUo zp}6VZcMpqMA-O_g3gwzz*UT?cJ1j?N*^q`mN?iBmb?BrM2l*C{jvgE9@>yuoQLL*3 zr=PUEvZx#w_n4iRkDWH-PFEh=ZDkNO@*yZkrp@d7V({MVxKF&oaq?JgvU{_QT&Tg3 zDHOe|nF}AUW#(x}jGoS%c~Wa>j{*9b4xvtJhAV54(G(gh%q$%R0!OZf z$vZDx$T-*Y3*}R7>wC4j$RwH+ZDdvFO?i)5;cqrQavn;q+@#WsZNov@oR*IB+9lw+rjX3P>c&?8dUmy@;Qw!C`1LVA5jZuMg6kV{!I(KXMnCV#D3 zlzzUebI+E|AJ)shl@jN|wd4u+CJRYZB8s zn=uf)aPt^@Q2e9l{27jG_%njS*IAr1?EgV;_U+IVSO3NJ=|#>CuJSi;YiqBRm!IdC zvd)j&*|E~sde`eE^?ivoHIpd3r>iNH?10}dYQ@Zt+Oy)Tn(wDHVQIVjmHpiqTw~Gt zP)Fxh>np)NKi|^p{%_gQJ-zep=`8y^Vq3vtMfXV2>&rimcG@L7x31W*kpC95LPpQY zkt{clS$B4^Ydqp~Rz}M4y0J&irXD(Md{T|;ri6y{2s`toS$)g&a|#NIqjs#eJY5d0 z^#|j%rQlJ`qtA7m(6A@3su#cB*pYE?a`L9Jz_y{t7U3Im(e=%dGO@Qlm1q<&wz|B| zPUAeviMTpxaImhh=8t85wkj;DJKBRU9L8*`w>Zqd#~PeO zvD<&swdnJUjNk~#Oqy{+m!w>}m8#)s$zR3yyPrF`QmwPf>lDBv;}`RzJFHI*54F_^ z{J1=sn=LPvM#T?mNH08LvfP5fnv+`R?3HP4D!CQ?b1i8?r^1$aFm6`#WL4P z*{!`~r*%G?jRzrN0CC7MFy7<`qYQ|kme(MU~;#Qt1pOMYZtYsxQU5C&9RzZ2i2$F=D5s%`aU-1 z-pu*u&V_!gJkMeK)}MFD8QOI~=`g3@c+(fIHF9$L9N+uj4{jS}X2_O@Q_@{d@2F#b zNlxyFIp4aSJ4oB3GV;dRW?%M&WMHAgeFw9zMO+#1;w`c7TevHvB9xO-e*Ma(Xl0#~I6Bg^w-1k1eeK#@eS-so`Mk$gFQD8Sez)ST zzW!SU#l;Mt?6bG;k5+M<)7+wNpFc0us(kmpS`~vmtU!DJ)Vzqjk<35FjdWRq^$xT@9jLfPTgKr|5($4z%y0@Fi=_<-t3h0Zn zwpY`b%Y*ndD!5vlJ%1NoDcM<`j!LsfSW8`1{GG`xc(M*9ozlo z{RXetw5BZGUwz*j8QGuhx5yCLGP&uo)7`psO*5D9c7};;R95>q7>7^mE&ZLEF09{g zCgi?4XeH0|w~kX(Q^VI$&c*NQsyeidHRVi*BL3y73ctlOI1R?14fI@B1yLv-!K|e3 zr$Xlb_N{!O+CDOj(TfL$hqDzPY})p*ucfSZ@^$s4?xCf3-MH(W8lFb4_dYrKd#!Og zx?k3du4gc4vl#Vl^82L6rfqY-RVWUU-Y(atOw?K`$o+pcH8VDJqMQ;lej7Gp;vJb@ zQgm=*m^3^eFyE`4$@+$62yFdMIcF5HF8eXEfT4mL#1|A&H`dPSIo{UFPBF^onmTpr znaC{Gko`LrBdYOo94I@bj{e?kr+5C!f84)QXofmQQ=;|-Va?If4Ct&?E23lQ&vag1 zxg+dY_-?1}zEem$UtU`3jAtogG;?UU(wMX`k zELhEX|K=^T@kp1bN`G&{ATNH&4yt}6au`+z%_R1G^lF*~Cf7zK_FZ=eHWId${;d+5 zF829IZ0{>~SO`cT7pW9(5DVfKAyS>aP^N?bepCz|AXLFl5&~MWz!zkQU_*m(stvR! z|B%nk4N|6nN?VTEtH3Wn-Df!Til?6jVNq>G^!Id%Ulo|uw1M!n0K%DrrbKVw1* zz}-mND4;r^oq(rK2V&xLNmFA?aAXi=wqoL6N#8eti#C9i|G#H_-EK&6NT3uXmN4fL zdm;%jIc*4int+L3szuNn0ese<0F8EY7`I9j!DTU)?QF|iG3dnFUv-_OG4I62l3W1f z;UgkV3X%_`=(Rb z;jI>UUS(J_0Q>+dsR0az;wS*4PBLjKt+VE}F+3moNA-hjgs;PUYmV)1sGW&z5P%(Y zpd{l#sH$5;losLMBN&**^hly-LtjpwC4i47T^pM+mos=7wVjDCGfYY; zr^}9lWVix|pU9vJgkO<Hsjr-`qTDTL$NHt1?L5T)aRBbalDIG(4d?Dq#w?g9Q#S z+YfTmsTw>=sD`)+x;Kj=AVnYV9g5E=1_x{c{dv8T#EKOup!odyvuSrHfqzESEi z)XdqfCzp%U4LjDv()_>kz;`QU4V&8cjQBUUGYTw3Roi#nb@mW=r~Gtr9j#SgPcIn{ z`dkqZqfi)i2akMZzAn^hTF;fk+^NAC!Z?o#SfDfdAyqa4V7&=?aA*w22m{ln6k|9xBF%f_Yn8eqd~+s?R0Wd0DvqmuLf43 z^Kp2%2z=^&FFAh)y-J49zgH>%jt2w^?fHtwZ^r>)rIEFk}<9mQn zb4ay9n*+mN?Q?6)2epP-_LmV6E0_D-}Da4wb!fdqLa@yKjm-Y4E8DGeQ z8mvf=3$%mVNL8TUs{&e6u-W=4(7;H0JTzJ@1!M>_70$ShQ?qI!oi zFIdw`nvS%J8r#IUI9}4?%pGAp)AGDhOk>*ivmT!lqK#2CI0se-33VyFlk2Y&1}_ev!FUHibx3I-Q|{ zgOR4bwRM%evubZhQIo({#|^-Kx)G=P$xp5}7aH(M-tnS_%&N*t74g8RTek@4`tCY- zP`N)JJa{7Te#-@8bMqOWN*2YWv9?0;gHQLzUjLT)fiOu%L}WL`Cngfq6^0s;1G(+- zefdJ(lfjxRol|W3sWPp0W@9#UEvRCi1OE4y)B#k6I8Q!tH z*W=@-dGhgnwYr}krxZXVueIKF*QdBY zmEDcfAAyq=s5VSRYPcFZiO&GDO(75ag`oMk36J*&^5Y?>G@V2E6gYE|)kHx`L|pkD zTFdIl&=J184|KmWCBsCVVV6&+bEH2HVUlA3wjuDYTrt`{kg_42WUq;Ws=xKYpyG-p zqSU7Y{y$Mw3wmj0M#izv&#&rJok#lSiRfyUc7cx}vi|;)V~R6|3V2O?tzA0@oEDCF zi8Urw3`LsK`rp?yKB(xjGlb3xFF$1Sgk*&Pn}kgns3-lO0VZ= zvGIbMAdBb~vMHc0z=R6v!v+RLiK2kpr`%%RyMN!*cShaP11mXKgk5dPkeBv4hO$8p z?a|$ld6#hP{i%#FMN7nxy?Xib60xFLLOp9C1MSD}k0AwrRato*8;U=sxt)6Y2q_Td zU@f?|wa@l}gXtOI>p=Bi^sRMX>{?0B8CbBojy=Zu7$Bxvk$YjvM`#0&z=9zxea!Vo zwJFeHqCPe~h?;PJV7%Of|0D&qTsc%4Uz(c>Yv~+kKVceEL{!spm}kX?9PbWM5R;bJ zM{5?E*?s6D2jq@yW8SOiVJRhrYLr9gP|Cm7-NP`*G{kd%S}fctc4 z#%;xK8OrDj%vT^tzzp&{t<^yS2-HP_ydONkOCn3{K^42tH9Ic)D&C7qh_uhqvd zV#12j@MpfjX+7D^0Iobq9|4ald{WXU-!47q)%!GRWW1G=xgWtWl))dP;Q)oeSaZWY zXc(v%UwAoAbty`%-_J%~-ubNq6vJPa>=Xad@$HZ*?mwP~G8K8txz5NkR7C9md#hon z;ePaWb_PntmkO&Uu)ODi^>HxmAkt#TvU#ai;K&iOlC@=-Esgn2LL9TJvDJL!A@BIb z;TPU{e^4%LY4DzyJZyoG+n@X)$i~H7sywcO*eXn=nuUQ^XeWn;hohihHuy*Tvzp8o z4gQ3eRT{(ph({hCY#59V8;Jgsb{>8m;)0k_&`5T%+Y6f}V|UV@6&xNu8^U9df9Hyz z*(KGmo`Hc0G?UPQUn+2c=m}5K9kF)EDODS`DUnmMf_&Py^B(=vf$)*xVOYeUU|22; zE>Nb;H*|^Wd-mgF&V7IIh6Y5CB-m=s;?1tG1|i>PdyMwkc3AVSVsaHSXrhyo=i)f? zzH;SD`G>MH%;M%D3T))1fa$?vpW>(S;Lksa`3^f3=ICsb44D<%g<)U?0^{Z@Um59w zWLQ@cr|kv7^6RgQZ@=R>jhs+XnGAD^2i&k^L=<-KcNNz7xunU2YLtu)B~x8=cD4ye zqI#FQt;0kk;77ZVZDt7NQx#L>kW> z%eI4vwhMEci)E5F@ivSv6SydLq9OnTrv+seyrjr_+GpiO=+!&PcG-Q-w8+PgAHT8n zjHUOt9U8?XFuja3QRV&NQcfr)-s;s2@1E)_<97lsepLu|kgu8osZYsSAF)`rUsQa* zGR7Og3>pPvtgp;hO#cO)@7T1iG)JXle1E|Me%iMo;tQFQh=5~0(xs8;4x}zoT*sY0 zf@q0s#J|8Pg3!LR%6a`_;?5L^L8elm97t?O^4DcDcMVI{qNt{(1`_29(3G7HOA{Uq z$LJ0xOnaO|BrJ}OzZLxu&s~8Fv`r)u(l*A?~_fv5Xe)@b|9Sv>Y9wM*S z-|rc5H~w9^_GP~UK#*O~hCug31P2g~FPD=k&(!K1Ot$?t|8pUozs@# zb3noLH)6LSG3#9O98zQ;iP5hQ^}NivFB-5(YzNE%Ne7tocNLOo@^R9V>=Yz&L52i# znZ!LF73L&Y(5+;~6@+)UV9r4=TP>e6vM5@i9pdBvu=Xr{dX0)g@h^{|RGcJ|5uY8_ z8+hQ!sQbYFekrG8h=6@_xEr_ayx&paDsfrIWs*x?8o2cK2|&wI0GuCLg*W&rW?TC zfla-nmx@2&bk53VR19TZhKqH_vI}@qG1&A)!%}xmI}wI|RkG0(s5UZ+W0jPg_WY-< zX0{5mwM}$G&eb;&9~y7qAZ^{99Y{K6ywn|WdcMJ&WTSj2_fG#gx-SkBiRb(5XwQTd z4sOG~Gdl{N>E<*)>D6*E5n=4ef>sAcQ-~HnA?rJK&Mr4CTIR?uO4aN zibkG(9j`FmPgK)sm>lA;IVioxxk9Yo$o1@aVek7p2W5>kRzhHt=JZy1lf{f3>Co&f zm6sPn6)R)AQh&y?U1M(K)c^eA*8dTl^M2)iw?qG*!8v_Pa{l3+jt}QrtD_XvxJzTK zJBZpcwBR6v_V=nlpY!2H*ATxOpe}X|A8cMICjg>&5??tUYoqMsoB?w<1ffL3%Vz}n z8j>bSrV+&zx-%o#7|48eI5Oc@?-bVLhXe`7HAK z^-GYS5g`sne?szyv27dDbNkC-XGV&5`@-Byo7M;aAeo|ZH2_skMu7`N+&U>`>d zH4AXL3an&m!;{#J2fejuw6QD+B8c^sLH1`OK!iyqJSh5vs#+}~A$lR>IfpQ3?g}6+ z5_me)_g?eS7?fZs$XJIO^B6?w#2!c@^5EhYSfM1-s34ldegwj!lY5{s>CK0aS&tq? zVf!V5D1<4LzU%57kULJBr{K~ZR4nk+g=4W?$HfG&Aae$U`9z-lY;^1)CLvfNrx^Bi z60xA6!7l=%gW$;6ZoIEsf->9o4rJgMmH|?UZm~2NHfWMgdNvYcX$qi4&U4O9EN{1v7P3OIi6^!9UXpM zmx8DMQkgBnk!g52@f{yg$wb?VP+1un8OaQIeyW?H1d@UFA4n(+Sg)$85YAnJc{LSK z(h8dRBB>?~cRu=AVQ44&NoXf?-(7lnSy>7okHjQ2s6Lmnva0F=m@%rlg}f$nS!40D zsKb6|Y3;()<^jGoj&Z@RC`Kf1*MGW+rWX_n(8mNDS%DBV_wN2b2&jhsZvm=(=DEi2 z77@(kBf(5m5z-b7Occ8H>ObE&AFyO+UJ-R}=xRfi-^v00cJkDzgKHRR!mnodNr7;1 z(^>7Iv-+8^b#|04&9l#1X87_EDbKWlM|NFH$VLin^Jy|hi9|>!k<1DT)P&kjb;GgV z1>Q+(+(*o?lIj~9qx)A3_$;caVSD;YZQXg-`5BD^%>z!z@wY^j%2~8t>C!OE{2!pI zD0R$sa1fPsfLTbv@jNh;Wa99#`aBOIVP%&^EdvARQ;Q)tMjprxv`B!r5QTCHi^BXi zG60j-FU=Vh^}iq!yHQdJs>z+5>Z+D(Y?7NXsQ}l%aew-Pd!jN-)Jx$C(^4DDhBF85 zOro-Sf~;R&TZM#L6_f@@qj+@gPPzPNz#BfTjDnMq_sjiLw$=_l`z&QM>g^MmG+L9` zdtxtOcA<8UE5(0z-5hlgj;7CQvR0>w^jV2UG1Je9%Bayi-V0^x{0|06?~&t6wlV2;f8h#mhFBwYZ`y=ShUXSjkR{St=qbQF^d&T_`Prk8Dosu zwifF@iGY$SHO)lQ%f_^`=g${l*Yklz@e}mAJBc%65);892(E0dMe-{d2@j0ZvB^lC zjns5#7!hH`MKYH`BmCC>wABCp5T^8k0M*jcy0v~2>e9su=Y0oxrHPcYjc%iuQ#+29 zYVnN&b^nA)zu|PMx}1%NSuM*pxkL?=J%c|394{H{l1>N~$L6HVDQ%JdV*Tx%opW#( zkWKSj(ZiZs`%cmXAv*u2pM>RNrMyzgm;vZBV`LG4bZ{K(Of09P8>%M;H9^ao1ypXs z#v59kn;TCpL{evJ4(2MZTWxKUO2?807u>f8fYcRHV_P#k)jGYxbV<)84C6xbTFZ;l zaZ|joz5!|Nd{(GS-zwt*5st&AxP9V?yx;~yv|UBzV!7WgKOqk^PDGyyuNL-=Lk0{; zw@C^zRJvr6QL!>7;e2J(+^Eq>OaHBlkmptH?V3Ap-i5n&SCXvrYEe@avgiJnfP&8h z#OV*Yurw%3YZ^p8hsN78p2(SBKezv-2q zytk*h`14;a^95oA7i1Ve+$O(o$J3?r^h@U+xNBUe{O)E{!o73n4@{dj#7cPf=bMks zU&pHaAI!SaEOTlh0{+3@FMJ@mzG+n*f82@u41 z@GM_y9n1lQWvc2Vp~aiqp(eBzVceUg6@=^~flQR?bd(`|vPF3O)tA zUwBVn5IXX6=(|vn#0~rEbVwZ?sk+`tXoe*&>@@FlQU!y){ z!YA>hs>-0REJU+m6%`|M@J@ju1{*tPXlUqIO{T4y^~QPekvgp1SA`hmEs+<}cs0jj z#zowK!y(bXh_IXG0Gl8tHVv{iA2r6fp1oDNiHD;fJm5odv;*p8Wb>Rz_N!p7+PPM# zmbvvNcHY^jFo{DQ$Ovea?s_lLIY)-Gd$Zz(U%z9nI+4t(d&lI}+eELpt~18nJCvrz z*}Xz_^uvS}z)v}%E|SnxL{%&%CPpUz0_L@&X?Ms~6haOVP_jGi+=s%ic3rWm@nyuW zMA!rsmAnA%o`)ft&$XOj8)PECO4#{6JmUfNq~iv-i$*Hdb|sPRY_r!d!Dl1M#}C+d zDz8Tj^vn>nTqODHwq3ipfgBT*rV%8PTHUk#-!WBf_rMvT4~Ze(YT@(uN0r3fI_^Ze zr~m{-aD9J`&LuI0NGIeC$IWO{n1^f+_Tzx79Gh@`s*fdA_$on3kd8 zY!r>DiTa6Z>gvyz?&`CVxidBqI^ny2dFS!TQsMi(NRrCdtu^I8gw{cN=^Qv9%DeBkg*ns5@`?(+^1T5i4D93Mcv^?RmZvKW>-nwRJWC?DPT-^m9L=3xuGipa5nIHCbM zi_vjzW3Up>gC7ng4zh>bMt0AOr$cXn^u#FbxKEv(#B*qK{=EJ=`ms+PYZi!Vvi3F= zzh_U}Gf(BmYxppofl$AINu#h+!~XHM)kb9mJPpz7W6j=z5F89JDZt->9ZW_T39#WQ zWFi_gA~+0rX0BVg(iKydP=*s-K2%QXU%Hx`xxc$N2t2E$uW#|t81*KiVlMp3R<>qn zz%>ovgs^F_-=HzeJ;osYAt)Rop8!Td6jPlA*Y455qa$wPECQLphm(s0;&ix(QM|2E zcePT)K*-C;zdSw)eo2BR&gaX1ekTe}rs~IP$LRscNbZEzL_<6<1sg9)U!ta1Z}w(G zCOs0n>Oy}04Yx)GO%m?iyEJL;Is`3TjMG_NUQ!~K(B9T&41YP0(3$OkCNbSEG*a*upZtR68v1!GtynkV8H2(?E}kKaDylZE600TW>9lPNGhEkjme&s5J8uVp=-i}xlTHo;bY#@2R0 z`9>VAckXawCPrQ+ob_(b$i;G~@5N46QdM;s7}v~pl28mN1z&3o(lo58aU;}6zfA*B z4!VBI#pDbK6d%WhZk;OfhgT z=#3_!u_e?M8A*G8n?1b7nBo9xgRmiFx`U>se*mPi&cwrpql#;I&A*=U2;YU7$5kzQ zJ1!RPAQ{j=_vO9I>QFyI$+Q{fe#*A!lIhk*K5hK3AA-mR{gp(|LnGfQdK|ma{Kt--AGuaeE(9ZrE9K{0Hc!~CWQ84(P1yv@PT!Udf%E%8~)A65H$-vuSXL$K9Vx02WdwE z5He^@W`I=$ivu9HrRm1CYwqxe!ND$wMTzrqJ3RQh!xUmNfR+tUAE5xBa642g7g1?q zzp9yk-QmnIy7ppN6xl{JUQP8>f@#(BZsCaKF(f0l$Rfj)!yVsT8_1FfIk z5Rcv#Xb7@?Nu2=-f??j*m_;x;Lt1JhtW#13Dcz+*D$;iQyUZ zcCku7-^W?$gDNk$&p)Qa#GIpVLhD{!XN>bU1v7bYy+}H0#OG8?m$9XJ3(o0(kS5W* zD22%cnVt9M%^b9`vgcDI+Z-r}M&mUZ=|O6AKo(Ys$M$9@TU!gdjmY&5ym z+ok19*U!6INA>mb_Rh7YvkVYxCk%xHc^4717I3kd=aclS2rCAB1BRMt>kzEYg1=3Jl8I7GeMLm`7uQTR#v2^r_0z>cy_4bJM z24CL2U#PW1R$G)P>^iE(7MxNf{|G!O6w^punq^)d8u_e&kLAMcig+v17r~|d1mQGH zgz-cJ5JnA6S-O?&P~fjO7Sr&coC$a{dOyl{Co&%NV4O}ubhN6qGw|c8)KekJG=vj< z>FSbNYj*s2GKML*jgIWq&^Qhq=O0y}tCd%;zLcn!qN0aXelv)W06;rARso>BymQws z;m^W=L0x)p)J-pnbQxKN5@CtmFdnEexIi;#{$73f5K@!v@O~*hWdO<5@ny0>d!FHx z_2|P&lSav`udk0pSu`wum|gRJX)PO1Re%!MYG`PPC?LS1CzT4iuUX%tJG2en{$2^S zC$2xlxbT_tOA7N;B0J@w^X(DcODh`Pnuit^=|*WKLo{V%2pkHC24Ap)zqL%-y~vH; zucZ_l8v`|!jNsM17?1-7FAc2+>Ncnv6JlcvS}<+RF2m&E3Dz7_77w>^FL&4I(*7}v zdmOd=q2%N24wKeV`A~a0UFHOGtJY!`m|hOTY!GRN!@p>C?}JCoevXTjq@*d1JfsEH zVs`fclK$2g2we8@F@l`4NrZv`>sDVJS}>VDF+%7Z6eOfJkInj3JPvAo~RTk+u#_@%g=u;`Ci|?NUofr*q?=b zK1fyTfdipUfIr@?h-Y8@3+kncp;S{2&qHTRzrmqRv5y`J0+m81%wI0(U$A~av6IP z=!PPT-UoU#?Yb^B^AwViH25e!{_?5OUSH9J{mnkb`h^l``8IJ$@R6WgGTR#}0@;e8 z7k3sTQl@PkhBqP!X&x$|lU7zc@fsl2z%sxeN9zmMr!VQUTm_*e8BZ%dw(35T;;row z5fM4dY}olQJzcnZzgU@)BC{^jc5lYhKfCU5+QZZ)r?3~q*Ae$I;tJqKz&!>n6m7M# z;P-(>!EZgiehhYsOrJ@8l!sau;?rE(qC{QOG|=A*@DMLLAA1w#{1QC+nVAh@MJ$$cgEG#cSMq zc~$fYTDO2Yi)TR`Q>>qLnIDB~_apj};5Ws_%-Mac$Mhw}jge-fDCQtE18S~*qI;Vw z1T&;-b~HOfF#VSBIIGjAH&qni=t72V0kENZ?6WA7kr8rTVQEUwq+9pz?;rY=O67&E z0S0jYd*@ad*F?p&lf9E%@&sXPg70{R%($uEK>p^T-qi7Fq{Q;r*T2oqu` z4)dC_`mVN7R|Q}3zp$hZ$N;|mq@^XP2PfsWOv$-@#X634&FYYH?a=La`(8_BH!cUO zg%-LHlDn$`0ZT`JW8i|o_VKKYWqbbN3|%p0lm={oE8aQb+3)N{HDM1h|p_uFd)D5M;`%s;XL!6eMKHkUrWDZaq9S z7_Vosa)5TC`yc`|EnQtNjK8Z#5O7Hm9SA#8G>~BN6qn;6AvTzTNtoD=&fsdn1clm$ z{ep*~URM0~+F!uA$SZDy`7n}yd=C~p$x<(i3|Y~b|9hiBoQ`qR%OKR2Z!J7`x6NkbPoO}#_^(f3 zUL0qN0G-3Cdw`Z8kXH|zftRQ+|VJM<9==zLy-!Txxzcxxd!A07+9?DPC6jkyyNj*nQjM@&s) zx8H=v#gu$I#aAidE~Y0SXAEbhY2WrBIE$IuMCwV z2Kbh5bV)2x4t|Kr!eO?}c#R2JYd)4s1-RBDgpEYv$m}KEE6$@s7&)@X!lhnB6mczN zG^*Re=40tyv!t?@D*7feuD(+Wo_j;tzD;5iqc6pV9bg>Kd=h%1SIT-cZ5g(gYls7T zfK1mED7vI^TL#dwKR|%gcJ*`{m*7j9Us;bKF?=1i9%F+mkhqzPe8*w|6H5#Y`wF8D zZW&W*qs#5K;$D_l%=oUY_9sa0;Op0CqJDw`5xK*{GiJXbYJ?h5MEm=rmb;gzn&NZkF}i(OJ@*@vF_PAg@H2=d-ZbO$`1h|p3tWW; z$z0pYNR|8#Ugb`JOUB zpNkz}`xW&_JsJrSYikpFXYYA^%h-hY?D%n^vl4vvtv2!`@EQ9;+8mFGUR3Z9Q1$=U zVaMrRqYH}|QBjj%_sMv7tLyxQ{|A_3nDJ>^fqJU4{}T)Pr{+x{A3XNN5S{Mtfq^9- zKfW-e;{+I)P&s*beC!_>m^lln(tz^wabJnp&)1iy#YSGt8N-MOHH6_C$oHOxfDq`r zX11c{gVImT^i}8u zNr-}U1i4>30kqhS#q>0{{(RE8=&+xUz+^{ia#P>K@h4UXT?z^bnbnR#JGltaJO%zA zx&Qh}FKJ|V{ydin56*LnYh|TA z;<)xkFeu5;+U4+B7Xkk$z2FTk)lN?ImE*7VydY_k6A{2K}gC@P4s=zF)16U)TSmK$#H|{@?2j#=q7Z^)uZi=#=l#JV1Py z5KQAyPd7*OS|P+v$bm5pfCA$@5P|vPLdgh5fclO1 z+;nJ^W0*aFXsdciHAzjeYnNAmcsW~<>%kb(jy`?);)ybpLfIhX*S&Ch#0bcWD&+|f z&r-ANcplpE@ep>Hh~e`fL4r7g1K3D5BbKKfAXPvp65Q4Nee8@@6bB?e808r7A~&RH z-B{qOgGYmuCIEG)??^Ndvd-cH&Ub&>4)qh}y1zTg#tDMgAQaB z?$9SN$BKym8EPp3V3N#!hw?Va~AwaXOG*;p=Wv zQu>j_?%sk4jf1-`iddbh{J9E|+)lIZK9g zZ`(!z>Tm{Rmw}hbw$(}p8%JBq14)Pjj0ziif2!zT5nntIal^f`DPa%a~KeJfsKXT{8+n|@9&rV3qX{j$Lc=T z^U@uNAkHI9koi?_5o{DNz#9!i28Y*jZg5ss7I8pB#Emzfq#gnGcN`X9u|f>P?SQFx zVq7|s2Z6N}g3LxEx?E9lF`8dEci=cm3qZ~Zb$ZAnp)8(FD36Gy7~@c;B7Ap;np#^& z@j(|lI5=W2MKe-Vrliemw~;65;HWV#V$|DF=l(-(L#x-U!GzUm5)u+*yb*#!DXtUj z;ia7Ozzx?6a<+x8X87xI_LL=|eM4Fe-rK7K5M!G0ID<8~mwvxL*XP(jJT z`p=w(mH>DgKtaNK9BOD36g6HOE^5znp2KV7B6zQhdv9p(n571I2^UsXoiTbcP+&XZ z^gv{myUkvIdfd>{k&x!00HH|jI!*2VWwMm+v$% z5JqFHanxbyAr8PX7&ui$UyjG=#~Cf=5Hg4a(vk^pc`oR#FwA}_lH4u=s%UIa zVyGD!iXg!5SkRFD7dsi_#&hF1YX!ks{cHb@vpyc>&f=^KquMuPfa2_FwOwWhF?Qk- zu*6$HjX|NHELOR`@d`a<2smQfP{4I%2N%Ztc!my5yRqn9mo6T7w9fyp(bEstP#b>v z^eGpf!xagvLi*?~=;#UzbMQ=sm5Z6yZ9`4w)PtxALsg=VVg2M~LiiiS_GK1J#fAz4 z)7#1ql}QB|0dtsKlrloCVQ>ve9}qZ`z1taO1QG;@cZ`k0!!BLm0E0s<8BHF#RvN} z{>hgM)$DJJ@-%Kz=8X2{HDmQfr>8HHv#$MorKGTM*N`G8P#YVY&_isffk^UmE^16{ zpAC}f{&7P$u3vxq@uLrtNbBiOZ-$4X>cf~03W*d)uWA}%zAn2-xSbj?V)!3i`TW5X zxr)Pk=Ls{)ze4)szk&oH*3|gr=58}Gwi38dju>=Iu=RtSkE`#9ONraVgjASYEmk2Ru zyO$0vh7l1Io-eyQhiME^(C@=1G6LG@5nX07P;V?yQf7U9(9+ua1eI~Z`O(YIB+4M$ z<1QY3(aQ>o8|i(DeVU8K^1&hZ{KOeq5}`W5+PnEh(q`H~{aO2)y83kBS|pJf$|}Sh z5>*e^Ff8Ubc+?X{B}|U_3i|E!4G(t-V)cAlSBC}+lQMAqSx=hMv^(7RzO% z?t{{N@BSpcF_N`O42&ohd}Tg2Hw#;-$^<_YEgni$BWN{nrj+7?Ii<{eK8D^Hw*jXr znAGi=rOZcv)eh7p{Fx||6Z10tmmH-p1OBC>RdipyB;cnnXdhmmn z$+t%_!~>RUZX7N|X@{(sWw)6RvEH1Wogv22I(qc9Y#NpdLVUd07n{%>Y~zGnl6b#h z<}=r$Q3VHcR)ORK!i}|vJ^d5t(`}5zkr+BDbfM~-gJcT=X_iHJL_0&*7p*Kc}`dUP^YbbcxVI|Fx zZ8+8Dpj7`Ltc-q$uo;F=Ht8|@Wi~Q)`xS$lj~eG#uy?;Xl$JWeh>4koaSx_V#iwrd zdZpr69u$ovy8NOh2qw}hnbwzL3gx;+SM~o`Xr9I{umQ`;wuX<-2qKC{IIW_E5%(jE z+h{-3Ev9c^z#MLlY>yFzD+6Q8cOl~$d&wEpY>$g(#;6DJQvOZc&?&oi9YYgmKh!uR zzhF8eba$%1-Y8lq4EZ3cKjQ9$kT(j$Fd<^g%OPRtketDR#Dxbpo}UW`Szk}&x7g9| zy^WhhT-C>qM%Z`AL%eKYk03}wlI|y9dt6mVWEtMi3Kep^9w4Rp7zl*T zNQnW)=#9w-b1~#7#a7Qz+2pCLg*b`JA}MC5dHbgXC+#tRIPv-Pz(IZj-O z2VCf=xR1NkB=)_p<&a-{5ibJRQ4`D^R>=fefLXh<{nXq5ObR%EfGT?p zjK3sj6g;yry#HKPWEeRqMY111-br;H{w1R4nRl7yKZGKEhxt-QcBe*?3doNQpu4-2 z`m&48aUv;BP)1f@LJ3+qD4|at9u6*$QfnJZedt_fu@Mz9F+KxsGKSBGtIAs>Qu}E` zqexx;M_@J>s^|k;hs-U2HyMy=eLzsL0^`Q@y0!RsT&P$?IIbsp3)RUmH5wd>5Nce2 ziVL|A9uJ==vJ*=f+KqwDgKG%mrD;K%J-`I$cl`M8O<%tJ32X_A;-_v!pl92-y3ztO zmL+pIY^UhrjpVug@>fmfCg#HrKv=!4rfV+GM2-QbOr0v{=oq~Jrn5x32k1OE+o6|q zPB8aWv}Bck?;dwt8XZrl?Va+nmdPD|dW?lhYXPt=0bL+11)QRgPx&O~FZOaG|U4!yP2M5iwHH+X_0?_6$B%1t?VLqR3YzF;%yTC-NJ6HB%#cpd^N2o{t z3Y{(%R_n8I`16OB%9jVSOBGw&jc6i1q@e@H({fu}^8VS)fr&U@S$V&Vz!}*#@*-r` zBE(1rBt_u+<*hysWF=3<&Q1=fHR0sIzeCjVM{KN``UfSY9A7HO?e#6lxOqKwVV?v(X#bJgZ( zGno$wO7~X*UY4vFT)RLVH`})HgO>#@JSlv?#O?nLO~vzDZ-K&WmY4qn5HFcIv2B~{ zarQqO47D}E6Dko1dfP=Nq+nZn?9 zVoSzYcp3k^b?eroCXf(-ba`A)3Sv%=MvQjKl`C_M40UxE5jy|jLmj=4fPh?-wC+vN z*``5}ZuBTS`zBOiDd^68f<}a_`{q`s!|JCX0)(;(#%7Pb)qpAfUM&7DA?>@B|EdK2C>AU>>A%W+ZIoQ~;rAgRqE_^Y zb)FxIa6n80K!Wo!$n2m}B&KC}lKanR*tQ3s@o}p2f0Q(M4(Z%srsiyjka9z)ES@FJsf$Krs{uk1cHZ}ydDaZYM5l02H zUlXkqA)L_>q+r)WECYpt$cu?m=fSs9XFA>eh8ti+SRg4BY;)1A{~Zmbpgs6sp`o=o z#{JfRt&q&M7WzN2q}G0Wn*SkRJ%d5hKbQn z>z^Yx=3h9}HUCBhmu*jAgFz)z=a9+(QJSBcebMBk|N0@MOnE!p#hNfnno0+(;4sju)l1+i2@UMHi#s3uK%nO=9eQ`X^7ceN-zE_l^ zclLZc7i3;r=!!otoFwa!`rFTZ@iQm-j;lO8d|9Ai?&uxm-KMv0P93eW<_~pxHG7hQ zNWzk(V(#77d;33l8}Z2aZWkBb=6;fu?aG$! z7nR6A(BvL5r$}nWfMetASMiC@_WCao%g$L6_#tW0J9f@PogvjIj{oC5)4X65$rn&z zvlt#7gS$oOuYuc~q12e^qzp;Z*PMyI&$IA`L=@QfW3; zD#L1^b}1@FnU{;n7su3F1k#m&Q&v;gDf5 zTN`)q{akb_wKgOF1(lAT}y8>oiS>f4xBm13^ZPA$U3WB$15lP@bbG* zTYH74RJw@h%a?ZPaeM*B#=`88QrE_&sdnDY9%Hv!vRdF^AfJdR$bUF*-tL&a=Ba!^ zGs?=GBn$Ki1(4!8)3AAR@H1zqVF20o({igm1zQOTDqGbxJNt{Mh;IOK_q0BZ{xbc_ zn&kOh(w$40hljCR5&L-ep`9;I2=k!>}EzQheOK4P02Q5IUY>QT@+nyx)7yhkH5w?tp|L8 z8QaFxJmS(l@~fjW40Nw&v&Sji>4JRgL-{()&Fo^5Ze9+d-Mzbe$zm>y+~qz`H!sc~ zO{@R#yy~|H_~muY{w)bI8O)K8GuJlBDSGJqRcO7XXJ|{mJLP$Bg~tlczSG_P{Tj~B zvKFr<9@;y|zBqchYUG+D>wqN2Wkq4C1BqdzEA%)`U)?fIcp{%S-wAhjeYT{B#~v?Z zHKY0SyBXh>sVtJTO%;32JR1_cLO)jHYrKISKkuFn_B3U8SdFa575Pe4@5`N?8G1)J zq7xHOR*JPD0(dQU!}ntUf(}}mKwvSMK_c1R-@N}s7tLAL7}i+Y_wT=3o?4Y4L#YT} zTGMlwU1#7zk7UgqUA9bKRaRg?HuqFg1Pne!vUM!8slQg>9d(zQGQZohKYe~v(IO#W z;Cg|r@Y=Pk^2H$aF^%l9K)kDBMMe&zOHqV6`c-wbh}AW%Yb@c`;;~HNMeO|QCv@5? zr%ky0M2u;yEv^lXRbi2`PcCgV`eSCP?CyygZ4v>?e{KGLVN+RoC;ZY3-2y&IpU(*8 zS5_Xhb1uH)zf8Vk^1?B09tWccyO|-=)Fn#`%&Vj_Fgv5}eNaZ`Spm@&cfC(V)ziLd zv@Iil*7;`J2ZHAEd6sEOXB|q^;xP&oA~QnjzAu^^7#7Y!D#2V^9e35N#)j8^4EeIr zvMUI`J7cNvlIP$!GuT@D$w$)CVyFwhTs!Iwv*Z9{oXw(;$Yj|!t5w8gRb9DYG**#| zsE7-i%jte*cV+J74;P*CFN~ZTa2H#n#YIv9Y4}7vJ9KUud{@xL?5Ilk|KtjThvZW~Ll;_B!#gELQB25g#G42Odrqvco5<_p^lw9KbPD07hL z>z6b=er&fzO=9WU@_Z~sb{D7YGc@+@W~D3b;#*&Yb@kxjalfNQ#f6V_qIOkM*cK|9 z@j?e<6#HuLFfVS|vg?ekK(@U36Q!+6tThd-VPd&CZQY$3JyDw@e%_}Flasa%-LnVo zXM3L&`m{&>Ok7JmkGrG3xPGPTTc&N9!0+Q@ZPPrGZJAAmxc&StpRLS?otCzmRr!7{ z?la+ux3PRPte9Nd;OK~4VnT(9>L82sAuKYmON}MG=&sDQgO5mk1A7}oC|7$NNuJ)+ zDPBh-BVoy9Hzc=7ik*MKXrUSPFaI`8@1L`sD!~!ye!Y=BotPx4EJR##n_&sqdVoUt zv@UoE3tLcqd_isShVcpYh1IF;+ULz^!r3f>6heFb0^+&>2x|MV|IaZae>5$^4eBYhXAvYKK!T_tyAbONbvW!e;Xa!%~?U7@=mO z?Dj^6zE0KWV)tm@Gt!mLQtabMjUcnfoUUlC!cSVzCD}eVd6#pg{_9FS=hyOW zh;pFwa4`m*2xX6rl)n0NN=(eGiAJ$M_xTHUdwS?ee^4#_s{`?6m)^gpa#r@9%E!=R zFoITQ8etm*(IU>+1Ajp6%$-BFWq4`)n`5ix%!PXjhV8M5iN)ydJv_V?(H&naQ@5qVzQtTYLFifHU$Nr|5Fa~z6Qq4v*b3=SaKQpkWDYUB zwYl!gJ&ic_`?h88`^o=$O=0(NP7+!PgzF2Ew^)fw+));WQYX6%n}r8!?|p@AEoOoX zVq9C&k2EfEFcdb1<|sUZu#3nJ2wzL<3elWWRyH`H1{4eWy}x%34A{g`XcF*1K={SL zb&W%Z{EhSAvq1#&4MC^=_Kh3u5YS>Tpwo(-5h(MRj;szL7h~WyJt2nS=AX_gvT-0l ztPl%^F-n;L{h<@#Ix~8wm4c8ZF?1X|I{UQ1V+_0#nmu}7fbUG&R(_~05A`M+V)Ca?-_Y;}f ze`R=<<-XWs#n8MJM%lRCW126O>e3K-)?@Rvvy2=5foPu-=`}e1uE8P1FQGCAsB*&W z4PU*X{Lf3bI`aR2dHMW5gljZy!(ntH*yRA6cp>KO>Y8=3=c&p>lLIl~9-Z0WFQPjI z+b}Z8w>pD4hY+6!y&cR|VlF2%?A+Xl%hY66PvytIj{kGUrg+NV8%&pc{Hg1Xj^0>Z ztWD+Yk$gb!ijegGBEmjed=@eqj=K+{ktUF6CK1s9d&wJ40lUJVR5PCk@b}T5AOlJS zePJkSnMQZ_6$E-a$fBPnSRLfq)qJ1=o@cjg2cX zy7&`xx3O0U*(?QCed{}=9XrC9-;`*Aci~vjK~!61V8pquvBwYjez)!DUm79y$&!&{oi3en=nx>YP&|O)>jMqSF z!LHtZAmoq1KNB!XIlmGxH~NO3;0WUYM~3D~YP0W&vQ?PuCuJ5p61|u;DBm1Cb=6?rZavQ}+CNMYUJwnNXt_v{Z9(0DG3zbbr zUL?8nr`013fcX{42tTCIgYz57MMY$7Huk>jRHh7$(*E1k7_quI@e{+WyyNUcrulw` z%9A1_2hiIkk>I1wtsMKH@$>nx;2&Zvcm4-sEb3WAjAh)a=f6ygdAr2Z^yA-jv8*=+ z%M`&GNaX8qiw+f&S%L&M5BAh^y5jK7aQg4Xw-+J~;c1C!xbKo#UFWImyj1h1TZMiE zUN&v_xb$tQP(JCa`9J=vU21e*GbqvTJIanyixi z|K^XB-|!Flld$90S9kJoahYI(G7#GZ2;tem3cA8S#{FR7AcpuNHaaq5ACx>B!IQ_2 zcaMcod3bn8)6`UQXu{#OWvteBP^QtVTLF3-o^IS1dmO+6{o(+Z34k3?h%#%U+cggy z$j1QykjE452d|(#M`5}5gGLKFL3gm_d=;koAII3EuV~L%T_TjDZ+pA`Lp+t!cV&cy zQQ|H#?&UXn94(ftWPGZwzKZ8~)J(W2JEvn$$D#SgI5aRK-cI24M<-1aFh+*V(6cU)wVSzP(HNF^tvW(KCRE8QHj>s{; z21;rL4fZUbz&ReX_)G6a;Lc%&n>(f0N07`rih z=Gn6Z2WngE6_K-YE9Z!=m%XkVXW|wwwLb6W-USKrzOlRfHSHVKmx*qeCFOg=a$(S` z-!G0|)jO2(G3}+R`}4|u+MVGdF zFV%QIk#cu~;@QaD2?u&hS6tb*`|g`#Vhht@RwPDQ1KolcPR`R$gG)CtJN)$7me$m> z%*4}f<6kGC(qNRmUmOO{ZWIC`$qWunr_*C!85b|Q_r#ADi^TVL^)hN)-R~p(2)uHi z9RJx4O5fz+n&v)EL=yzvc`h1zUO*Rkp^(rw$1B$$ru+K(3PREf-D({M6F|k8D{qiJ zEP;XxMAII$yjQPS@#f40)mzHE_n~ZW-$;pPL0E;q`wo)L@}m9=#l<&a?K?xjX2ILw3;H9AoJ(tEOfQMR!Ya^YLaQ&m-!& zR6~Di8cWZT&0jH1DV^+nKMdf0&DCEb;mYK z>ZbaAg13+jT6<&lj&>_Xwx}f9zzRsv3JP9=_KRb+OI`heYwx!dNwm0Xh0r4bQ z<$Oa1#Ve#))8rEi#Kio71lf4<<<~rjrS*M-0z{dKXadMU&Krzkq4FRF)`q0@z>)3h z0xJWkDDI_P7p#qqHp+7oK{;Fu0d!{7%|Tb@lLVOBC8-Q~*vJ~<4KjQs&WN$`)9Pc( zF(&xBfMft%Y`K-QrV{G4d$HUMb%sz7E#_Uh*RQ+J&Szl5uNS}BUF*D}`df`Qx&KI3 z7^+c;&NW@|3Unx_Dk|1|OABPiv%ER5S-_X-Z+EsVf{FIWSE`*(>$P8h?v6xmB_Bis zc}(NzoLQu;as@^Vx~aM&xw-kryHZzoEZ(COdf3pAFG4Hkd@C+ws1KbI`##bfqunLd z1uip)H?OWP;N@+juxmqX?H}+rnE;UCEg zKvu0qVb;YyB4|z2u_uy95CvbY13Y;FYI=hIi$N>|GbDTrE=NKO2WtcdkL$pkj8r)4 zb?WKr?vANh=PZid3clgq-=UI3m^Urb%)plm>##(E45BmSJYt41++U5sjWC6S?u2e=1%f_vAn}SxoWK?l{tav6H8DaI*vJ7C#Y7?> zcve86E>CEJtBG(15U)&dkYMH**B$Wg?|YZ)Cy+5}P6qU8jrId|HEEVeS1CG~KP@)Chp zZUG6al;-1ZgdGfySRh5BtO$N)vj?zL86%g*x~tCF+7`nJc*nX@bwX^+RTbY4-03I7 zbC}V%ChCVH06E1bZfJP;idM`X@KQ$Zd5PjM(tjPs=J9(4ch>138*~j!13&=GP(yq| zEdJ1Sh|ufOZ#pyjdizF!Y>9X}p2|qfo1Q1>HpBrrHEXescSWFvFdujJScUDp1%lbu zuxju9i3uMb$R2F?+Aq-GxGZZ~t74^!7cLO21+%2CVo&fG&~eR+vEnp+_M#s**ey_L zzyoW$w$%7FaKuB1(GiSPk|!uUI75pUFW$Dv3Wy})jE$_!-jI4nfW*noM1^Tw7O1Sdw9dMP_8SN#j!S`-Ql1>i+%4v2G;hXc0qvapzBd4br~Tm|;p+nt zlF%**vLL}Kz`DAcjYRZS)bfPYva_?3pvw_H^Fvq8t&=24T>dWKJ?=`d3*fQDxedQf zWQ}r=JU}bq!U^x#|IK3;6W0md5f)Tp@Gkc&7HxB`(Rx^8x~43K+IGJT$ZbK0A!}f-6X9b+cLWJEAH=;>K*V z=xNMqc?mi|xP~xBY^^}UKq%<_Dm=o2aQB9fLCQS<&{4r`vD7=VQZK!tG92@42;?AT49#F9NrN4 z>S3$dPw@t0hxqzv`#C;zC@Bj8Hc)8-aC6Wv5)U`xiy3C@9^zeqP3$nXy%QIOfBv;P z)0md2c;(k4Wk8^_;&)oFqq_)?V3q2kTL1dsg~U~Q%OTh4>{X8i4;9CB&;RNI5H{Y`BYXuKZ&9nwHCFjo9xHCs3kL_N|n116GJm?UoTb5(T5w+F& z>deZ(Pj6$OkAL5wx8U;!lTq{2`H%MSmj*0+6F4$*tgrsD151)Z3rNx?7q;2Xyb~6l ztV~^|v_SW;=cCcC#;<`^=MK|-b6l4!3KV#}^tbWr9qW;ba!a3s&F3l}Xldc_yjdRZX2 zrzayUQXq;(zFrn#nWEh+$?|;B$|yQxFjF30BA{FOT*p*GS*ScZn!nF3thHtl=`g(} zvfcHFAUCKnb3W&Y?8ud1r`t!rOspU4$v_tNoUL_X+sTv7XQQ{#P)BvpjTlj+m~WX6XlZZ25OEE}ziN=x7mtcVPp zOPyX+vC-Hsi_AK*pG@;mP5AnC=U~6TAaJM`c=+28&JXLANV%-k#KZqK6my!-+g#Uk z*>*KlYdAk-#o7j7($qY96%GSqSnR&Qth}yF{+`YD;_2iU(W8}|E)HOj9N{g4L zX(j*854))u%MOu9=hO3t-jNt4$x3;&5A#wDJe_zQb$OrUidXaBZ>$Xn+mldQ*E23W z72etw3w7t1ZPOf5#*w2}udm!^MYo(OCfjvb-E$rS(jX1uUS8@P*5<#shx||R`fo|Xh)u8&Xjb*5}|ghnCwFdtntLH zGtbg!DI;mGem&8~@6%@sH+%XTTsKi?%sDNEQPuM~O@@T$czN^i?d2SEb4>GBONqdAtTH=OK$U4q5gE+Ml0%A zPt1ia;B>O-O4-G829zu{iQ{XtH>zNa`nB_oiql5EJ=Z@)XOc(exJ6#(#})Vj1E`mS zduDt7%0wWwDc9uU4nE&Mb~nHA3UhLXOZ{jpV+O}U@|H5N@!|L*mnbzXhN1{F)JM8(?yx3&k zyx!_CPW|5cl4h%-cYb`YEshTy$Y|`}phqI@C6iAZyt})}Wu$HsBO_D93!+)*HQp_? zYjiiPH@-A`Ws3`^B76m|GftB#Q%{;bDgr9yT;!T~=I8g@&eV(O^$)&q`%!pGWl>$v zW;!&dvoAP!g=Z_<)TMSbtlQa;Wqhod@|f0=%n7&r)ugLel@&!=vt~js?W1PqrMfF$ zPdss@udvYRyv0|LXS{4VIcQd9k zZbrmUO{TsbJn*_ywtzZi-`&()RrkqfB$6$*sjK3Ru<$ur$|qT$ljD7q){}~Fig^O# z^{-vPgI#Xp!J6i|ApR7rY^3Z$2%Ozo{}Uv*J%#@;qy8u-G(D0rNZk(yiwS}?w+u@ zdi-KLfq9)>I^LJz-m~yYZgWpwk!b;mlrBrreRa`T@zM9M`-wB8=Fs){%aG}zaf7h1 z!!t9jQS=GgKIWr6!t5CGqu%~>Q>N1`jkCq|bsxDb9TfdIIAltlYgq{;zqSW6F`9wf zGvF_mc7wiZG|2D6-|AL1uBYBI$#6)mt2ghi(RSWG*xX=XaN$yy7O_q=%c^rN-D;gR zx|U-=MYl~{#8>gcg>CW|q_hs+_fh0snt!=&V>iux(Oag;c`J2?^XqJkGTQ8MQ(k{t z^f)thu{C?ryTCE|#B$x(Z+bc{@zGhLJDi;}=8HYcOPQ1Kt*bHTz)@>b<5Z9G_FPX&|sK(sv z$j$jAZOOQ#8!1_zgQNtGPTgH0J}5U!{CW57h@~+bv&CIY7B40ivOQgu`*6W^lpzt> z!@D6Vs(hllz5`kGkdnVBy-Ew>EQ*vo8ORn?X$-|b<_ zMf`Xo45;=8V!Zbf+qAY=@esLFuOtF1T(|-mA2F5kLq0L7(0JjvNdDdWJ_8``|yR14-GxgTALU!7cE)*dd7E+ z)hp#vYG3v0Y?rw5@{F9b0v<^=;$fvF9~{5mQ@BNHI(aAQ`9l(pL%m99TO8|K_0dC9 zwSKU<)?s|NzfO$Z^cwOJkKdhzlRZ@T#!&=H(=5-aIdDgknVWG2+fclB63hBn#KS3T{+|oHy86x-c>mNP`m*6ddxut^E)rGpl6{T zQ+uMNpot|=GPRHRNCG_GwhkoH&rZqml`A_uifT5DlgB^mxb!}cnU3>t&A@Ob32Yz> zqXN|$>K_peDKv3z-fs!DLoED6ye=fa!=HtgEV+iZRObxfZ6M~qa3j2^0GFWyXu35V z6y5;vd&2EIIywlvB@!L48ylAqs%M^i_S0z&LifbW0u4h9gal|WIF-!!B2Z$T6(He2 z{1t}$4q=~`kM2E2Z@zq=7|p5x_EsL-AEHO7GxMy_Hcg-K+9FFupoDpaHYVL5vc3P= zyde=|H63?E*ssypr%beH{Asa-gob7$cBcf$8t^Y{>~*Wb8~3HKMndjwKEDvVcRxfN zfW)%Ya_)p&;E5P>-gNJV)8U0?Z*XA#ATxWkbM4)5#u2W@-d321nnY;TcNUA?AJ^v5 z>!8H5jnZg$S@jBehDZ8n2-22?D+x#k&BYun{=r_mEv(vSMif_jmSeXK``me=Rd_bl zkE+)}M9h$wc$0d*M$Z20Z^-0x67cYVfW6pjJ{A74XzXy<5qcBZbZp7G$F*#YlTt7{ zgUG!AWv+hmqzI0bX_|gNz4ZP2D^=&zaR?BPJU!vnc$`UeuyJtFQ^O5zYYvz90ex&n z)wu?9MgwYuiI6XPXb+K2!!F&h7-kI4X+3uA-AqY^(|Gi9}4xTp$RSF$YPVOX@26vE|9n&!0O-q-TD0 z*NNf2;7gyubTm34ZX#sw{n?5jcxB%iD8p!PqK5_}!n#I4$k7*-vu(+FASvcaLmjz5 z5FMW-m{TI5&IJHU*l2cdWW{}=Ws66o{xr7Hn+(VQ-p7)m#SSO#?Vqj=8jVRA7z?~h zuXlBKR4<*>QzpwIctngqNG_HrKx26bS-L2UN<%QKe({gi_$`0r;4>Scs=T$@s%dT> zRZd4>=biFkPLx|KnlRvWtZl_qztz&xMF}|-;Hs|&LGfZ{V9GS`c&Do#Ym`hNk8r`k zAqXLpa3%r1g$p{sC=&pITNFn7(vpc*7Cl`7a$)&6ZL~sJ0sZRF85g3sX3u_)t?~B~ z=UTOd_Vg1Q&9;U|KYX}zKkXmdXZ`K{ae&v6wjILE6+x0c$R;73=MV+35m9wb9dSov z>Lp~q&Kba73Qgo-;lAR7HN@nYT2Jq003gg3RV2D)J07^% zjmc)#1Wl$LUdFcQC^)6SL4pucepV*yH)1(&HdP8GtOj4a1k`q-uhU%NoOl$Y1#Cv$ zfk%7x;srrgM_xuAXktv8C`}MU7$}A#^96fASpBW3VM0dcX9@d#BKmzwqW%P#6kyoz z5;2D@B{vOIvK;qkYGid#y6XGJqluXX2%h8o%vR?@k|a%e?~OvUiA;xOvC-M>eSJ6J zGa!Ia)TvU69A?4++M2CdoP?=Jm=*mvHFYrpTs9#ibRoTt*5ho;^VBV>;uH&J{Z#UD zuQw(kv^8Cccl$JL?~|MRBU^l0m!u81HV;1d%sY@c?o!2J1z zNG2v@>10gT(~6Ls4&J<}zhfioU#&}j>tC(QQWW`^D?)0Nba#$f#&=zYz2zmebaaK$ z&GSx7Ol(ee1YO#11YsL7oM|-U!fI;Ufr%{!Q3(btuhp7&KON$Kj>gzdaLk}iS@-tO z*PkvHNn_PFuzu)D)%hpc^Ci{9xV<>wk2sJOHCytvq}&wtoH~jjaYowb+0ProF*b!A0~b0Y?D-<9tva zm`g-_5U)>w{D}E^4vi_IY12Q(jsSZ~3@=1#6q*h(Lbm+kSSpJjZV`eW?mxztS92Y!0p{vcIMv~AhyfU@c!)ysPVgyjnYjp^o4SLAH+Mfo?jfA3RI;F zqsL7=ebH@hi2pbM!VQGHc8prs2Rl|lv5J_;f3-oy^R5xkLxNs{=C&6`Mx&&T1u9xF6VsRx!^~~Z$goe;9aFZ!Ow>p__6Hz|P3r@8=SV3=q`T~{d0EWJlx4TL?3vGk)Zhs!2yXE%Yf|ItZY)0k1IO^>LxR>jwODdIJTXC=o9mv{$CV9KW@pbrG8ZWQ`tPM ztC{_89p(MYTgRvO8^Kni1{5hC9;pWDc8gTYwz0sQ_UYdB_@Fc&?m-xSTq9?X7$ru2 zgsk=jgX`5nI@{|fTvh*NL9^bUFMdSAVfo|11QfU#2P@0UQZMzJQ)7Sj8M3Yi%_Y`o ze0^MP)0_RXk`@$XE?Tq5kceQSu6XK3ZTDRR;98NIpU*%{_ROyq#ef~|^4>n-tS2b+ zcSLB_B3Z|Sq zITx~Q)A@%rJ@^KKy>I9>VGG3DKz*{lkb5Zk2;xcQhr~a@;)Y2DQ;|>Wo!8Rh z_uIP?>fMFjkLz&Tytqq7@$EAQE-4a8YL*}f?GFk#u}|G=X7;$b?<>p)UI4m8p-w)N z^7&iE4+*Zemhhm0AM@#zb+}RH-%ITTgNz{7$Gm{1(n(KcGCe3L=vQDp3XxGLSYaJ{ z8z`E;lr=$5RG2*wAlE|$Z1*WN6HNKM5gE!7!UB6kD<%lrJVC*eG>=?ANpg!Riyxb| zVAP{dv2KF5P?K}Cy$nFf>HFF0E_@r|{Xk9K1aod(E9gBk++Mp7>Id{ogOA$_uGe)v z@G#v1fN*{5Ka{5-!;amu{e5LjpTrb=>QW4qwSKkz22oi5rSE)d3QdAXm#{k$TPR{i z37m4P<55ZhfWkaGVf(S80KEaiQdME#Sm^SgVJfKxPErzL5*4o zt`w?0Y|~&T(vxsF9QE(wpo9a#bOUnmCUW=<4f+JM0_^UW@9i*PrBi4mfNJFK{vakM zrY7~lUG~3KS_O_o`EyeJj&+3O4LM;xrzvIHx`!(%UH|t|YL!vYToOU@T#69WW%~Cw zSakeQbCF0WUre@!A31!uEj0-ha~mqet>lWd>Z58TK4CeMXaAF(XMahl)&o1czvEMs zbm8F(p^}T6+iY^^3*I5B!K2mUn_lbvXI*s1nn3J9R7F(+TwMPM`Fnp~Nv>bwzn3?% zq{h7NwvB{}|Cju{B{b=8#l+(#dOrs3KxzbVS=5_fJRi0iyRYvA-xe0Go1y@H00B!-TMXmPFnDnm^VBt`NJF2@19v6XcYv*$0Rajqt2?Qd^$A~y;s7%~Yg|FP_JaW3 zj?Z=?-9}t!lRlS#fB>8n#C8CyW73I@VBEij^{juS>;D(6b(8&Bjv~bDLsc%l#KS#Q z*%_!5dyV~xv9UpDK!_-u@&RnJ6qd8^LM8|Q+eD?+m)%&<0r(j}nSBb825V|oN(+a={c?%k&*nlE9oH$3A4L_x} z??kN}#|(Qv{o;XI>4;M#eC^lJQcu`za`k@!?0gE| literal 0 HcmV?d00001 diff --git a/docs/multitenant/openssl_schema.jpg b/docs/multitenant/ords-based/openssl_schema.jpg similarity index 100% rename from docs/multitenant/openssl_schema.jpg rename to docs/multitenant/ords-based/openssl_schema.jpg diff --git a/docs/multitenant/provisioning/example_setup_using_oci_oke_cluster.md b/docs/multitenant/ords-based/provisioning/example_setup_using_oci_oke_cluster.md similarity index 100% rename from docs/multitenant/provisioning/example_setup_using_oci_oke_cluster.md rename to docs/multitenant/ords-based/provisioning/example_setup_using_oci_oke_cluster.md diff --git a/docs/multitenant/provisioning/multinamespace/cdb_create.yaml b/docs/multitenant/ords-based/provisioning/multinamespace/cdb_create.yaml similarity index 58% rename from docs/multitenant/provisioning/multinamespace/cdb_create.yaml rename to docs/multitenant/ords-based/provisioning/multinamespace/cdb_create.yaml index d3b5e04f..8ace42e8 100644 --- a/docs/multitenant/provisioning/multinamespace/cdb_create.yaml +++ b/docs/multitenant/ords-based/provisioning/multinamespace/cdb_create.yaml @@ -1,4 +1,4 @@ -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: CDB metadata: name: cdb-dev @@ -11,28 +11,28 @@ spec: replicas: 1 sysAdminPwd: secret: - secretName: "cdb1-secret" - key: "sysadmin_pwd" + secretName: "[...]" + key: "[...]" ordsPwd: secret: - secretName: "cdb1-secret" - key: "ords_pwd" + secretName: "[...]" + key: "[...]" cdbAdminUser: secret: - secretName: "cdb1-secret" - key: "cdbadmin_user" + secretName: "[...]" + key: "[...]" cdbAdminPwd: secret: - secretName: "cdb1-secret" - key: "cdbadmin_pwd" + secretName: "[...]" + key: "[...]" webServerUser: secret: - secretName: "cdb1-secret" - key: "webserver_user" + secretName: "[...]" + key: "[...]" webServerPwd: secret: - secretName: "cdb1-secret" - key: "webserver_pwd" + secretName: "[...]" + key: "[...]" cdbTlsKey: secret: secretName: "db-tls" @@ -41,4 +41,8 @@ spec: secret: secretName: "db-tls" key: "tls.crt" + cdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/provisioning/multinamespace/pdb_clone.yaml b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_clone.yaml similarity index 74% rename from docs/multitenant/provisioning/multinamespace/pdb_clone.yaml rename to docs/multitenant/ords-based/provisioning/multinamespace/pdb_clone.yaml index b88fb71b..4dac1aea 100644 --- a/docs/multitenant/provisioning/multinamespace/pdb_clone.yaml +++ b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_clone.yaml @@ -2,7 +2,7 @@ # Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: PDB metadata: name: pdb2 @@ -21,12 +21,12 @@ spec: assertivePdbDeletion: true adminName: secret: - secretName: "pdb1-secret" - key: "sysadmin_user" + secretName: "[...]" + key: "[...]" adminPwd: secret: - secretName: "pdb1-secret" - key: "sysadmin_pwd" + secretName: "[...]" + key: "[...]" pdbTlsKey: secret: secretName: "db-tls" @@ -41,10 +41,14 @@ spec: key: "ca.crt" webServerUser: secret: - secretName: "pdb1-secret" - key: "webserver_user" + secretName: "[...]" + key: "[...]" webServerPwd: secret: - secretName: "pdb1-secret" - key: "webserver_pwd" + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" action: "Clone" diff --git a/docs/multitenant/provisioning/multinamespace/pdb_close.yaml b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_close.yaml similarity index 66% rename from docs/multitenant/provisioning/multinamespace/pdb_close.yaml rename to docs/multitenant/ords-based/provisioning/multinamespace/pdb_close.yaml index a823f5d9..44b1a086 100644 --- a/docs/multitenant/provisioning/multinamespace/pdb_close.yaml +++ b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_close.yaml @@ -1,4 +1,4 @@ -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: PDB metadata: name: pdb1 @@ -12,12 +12,12 @@ spec: pdbName: "pdbdev" adminName: secret: - secretName: "pdb1-secret" - key: "sysadmin_user" + secretName: "[...]" + key: "[...]" adminPwd: secret: - secretName: "pdb1-secret" - key: "sysadmin_pwd" + secretName: "[...]" + key: "[...]" pdbTlsKey: secret: secretName: "db-tls" @@ -32,12 +32,16 @@ spec: key: "ca.crt" webServerUser: secret: - secretName: "pdb1-secret" - key: "webserver_user" + secretName: "[...]" + key: "[...]" webServerPwd: secret: - secretName: "pdb1-secret" - key: "webserver_pwd" + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" pdbState: "CLOSE" modifyOption: "IMMEDIATE" action: "Modify" diff --git a/docs/multitenant/provisioning/multinamespace/pdb_create.yaml b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_create.yaml similarity index 67% rename from docs/multitenant/provisioning/multinamespace/pdb_create.yaml rename to docs/multitenant/ords-based/provisioning/multinamespace/pdb_create.yaml index 200f3712..2bf2189b 100644 --- a/docs/multitenant/provisioning/multinamespace/pdb_create.yaml +++ b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_create.yaml @@ -1,4 +1,4 @@ -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: PDB metadata: name: pdb1 @@ -12,12 +12,12 @@ spec: pdbName: "pdbdev" adminName: secret: - secretName: "pdb1-secret" - key: "sysadmin_user" + secretName: "[...]" + key: "[...]" adminPwd: secret: - secretName: "pdb1-secret" - key: "sysadmin_pwd" + secretName: "[...]" + key: "[...]" pdbTlsKey: secret: secretName: "db-tls" @@ -32,12 +32,16 @@ spec: key: "ca.crt" webServerUser: secret: - secretName: "pdb1-secret" - key: "webserver_user" + secretName: "[...]" + key: "[...]" webServerPwd: secret: - secretName: "pdb1-secret" - key: "webserver_pwd" + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" fileNameConversions: "NONE" tdeImport: false totalSize: "1G" diff --git a/docs/multitenant/provisioning/multinamespace/pdb_delete.yaml b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_delete.yaml similarity index 70% rename from docs/multitenant/provisioning/multinamespace/pdb_delete.yaml rename to docs/multitenant/ords-based/provisioning/multinamespace/pdb_delete.yaml index 282885b0..296c9feb 100644 --- a/docs/multitenant/provisioning/multinamespace/pdb_delete.yaml +++ b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_delete.yaml @@ -1,4 +1,4 @@ -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: PDB metadata: name: pdb1 @@ -25,10 +25,15 @@ spec: key: "ca.crt" webServerUser: secret: - secretName: "pdb1-secret" - key: "webserver_user" + secretName: "[...]" + key: "[...]" webServerPwd: secret: - secretName: "pdb1-secret" - key: "webserver_pwd" + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" + diff --git a/docs/multitenant/provisioning/multinamespace/pdb_open.yaml b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_open.yaml similarity index 66% rename from docs/multitenant/provisioning/multinamespace/pdb_open.yaml rename to docs/multitenant/ords-based/provisioning/multinamespace/pdb_open.yaml index 85fb2ce4..9f85f0b5 100644 --- a/docs/multitenant/provisioning/multinamespace/pdb_open.yaml +++ b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_open.yaml @@ -1,4 +1,4 @@ -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: PDB metadata: name: pdb1 @@ -12,12 +12,12 @@ spec: pdbName: "pdbdev" adminName: secret: - secretName: "pdb1-secret" - key: "sysadmin_user" + secretName: "[...]" + key: "[...]" adminPwd: secret: - secretName: "pdb1-secret" - key: "sysadmin_pwd" + secretName: "[...]" + key: "[...]" pdbTlsKey: secret: secretName: "db-tls" @@ -32,12 +32,16 @@ spec: key: "ca.crt" webServerUser: secret: - secretName: "pdb1-secret" - key: "webserver_user" + secretName: "[...]" + key: "[...]" webServerPwd: secret: - secretName: "pdb1-secret" - key: "webserver_pwd" + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" action: "Modify" pdbState: "OPEN" modifyOption: "READ WRITE" diff --git a/docs/multitenant/provisioning/multinamespace/pdb_plug.yaml b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_plug.yaml similarity index 80% rename from docs/multitenant/provisioning/multinamespace/pdb_plug.yaml rename to docs/multitenant/ords-based/provisioning/multinamespace/pdb_plug.yaml index d9135f13..10719ccc 100644 --- a/docs/multitenant/provisioning/multinamespace/pdb_plug.yaml +++ b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_plug.yaml @@ -2,7 +2,7 @@ # Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: PDB metadata: name: pdb1 @@ -36,11 +36,16 @@ spec: key: "ca.crt" webServerUser: secret: - secretName: "pdb1-secret" - key: "webserver_user" + secretName: "[...]" + key: "[...]" webServerPwd: secret: - secretName: "pdb1-secret" - key: "webserver_pwd" + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" + diff --git a/docs/multitenant/provisioning/multinamespace/pdb_unplug.yaml b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_unplug.yaml similarity index 77% rename from docs/multitenant/provisioning/multinamespace/pdb_unplug.yaml rename to docs/multitenant/ords-based/provisioning/multinamespace/pdb_unplug.yaml index f3667dad..f30f2699 100644 --- a/docs/multitenant/provisioning/multinamespace/pdb_unplug.yaml +++ b/docs/multitenant/ords-based/provisioning/multinamespace/pdb_unplug.yaml @@ -2,7 +2,7 @@ # Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: PDB metadata: name: pdb1 @@ -30,10 +30,14 @@ spec: key: "ca.crt" webServerUser: secret: - secretName: "pdb1-secret" - key: "webserver_user" + secretName: "[...]" + key: "[...]" webServerPwd: secret: - secretName: "pdb1-secret" - key: "webserver_pwd" + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/provisioning/ords_image.md b/docs/multitenant/ords-based/provisioning/ords_image.md new file mode 100644 index 00000000..e2d1dcef --- /dev/null +++ b/docs/multitenant/ords-based/provisioning/ords_image.md @@ -0,0 +1,81 @@ + + +# Build ORDS Docker Image + +This file contains the steps to create an ORDS based image to be used solely by the PDB life cycle multitentant controllers. + +**NOTE:** It is assumed that before this step, you have followed the [prerequisite](./../README.md#prerequsites-to-manage-pdb-life-cycle-using-oracle-db-operator-on-prem-database-controller) steps. + +#### Clone the software using git: + +> Under directory ./oracle-database-operator/ords you will find the [Dockerfile](../../../ords/Dockerfile) and [runOrdsSSL.sh](../../../ords/runOrdsSSL.sh) required to build the image. + +```sh + git clone git@orahub.oci.oraclecorp.com:rac-docker-dev/oracle-database-operator.git + cd oracle-database-operator/ords/ +``` + +#### Login to the registry: container-registry.oracle.com + +**NOTE:** To login to this registry, you will need to the URL https://container-registry.oracle.com , Sign in, then click on "Java" and then accept the agreement. + +```bash +docker login container-registry.oracle.com +``` + +#### Login to the your container registry + +Login to a repo where you want to push your docker image (if needed) to pull during deployment in your environment. + +```bash +docker login +``` + +#### Build the image + +Build the docker image by using below command: + +```bash +docker build -t oracle/ords-dboper:latest . +``` +> If your are working behind a proxy mind to specify https_proxy and http_proxy during image creation + +Check the docker image details using: + +```bash +docker images +``` + +> OUTPUT EXAMPLE +```bash +REPOSITORY TAG IMAGE ID CREATED SIZE +oracle/ords-dboper latest fdb17aa242f8 4 hours ago 1.46GB + +``` + +#### Tag and push the image + +Tag and push the image to your image repository. + +NOTE: We have the repo as `phx.ocir.io//oracle/ords:latest`. Please change as per your environment. + +```bash +docker tag oracle/ords-dboper:ords-latest phx.ocir.io//oracle/ords:latest +docker push phx.ocir.io//oracle/ords:latest +``` + +#### In case of private image + +If you the image not be public then yuo need to create a secret containing the password of your image repository. +Create a Kubernetes Secret for your docker repository to pull the image during deployment using the below command: + +```bash +kubectl create secret generic container-registry-secret --from-file=.dockerconfigjson=./.docker/config.json --type=kubernetes.io/dockerconfigjson -n oracle-database-operator-system +``` + +Use the parameter `ordsImagePullSecret` to specify the container secrets in pod creation yaml file + +#### [Image createion example](../usecase01/logfiles/BuildImage.log) + + + diff --git a/docs/multitenant/provisioning/quickOKEcreation.md b/docs/multitenant/ords-based/provisioning/quickOKEcreation.md similarity index 100% rename from docs/multitenant/provisioning/quickOKEcreation.md rename to docs/multitenant/ords-based/provisioning/quickOKEcreation.md diff --git a/docs/multitenant/usecase01/cdb_create.yaml b/docs/multitenant/ords-based/provisioning/singlenamespace/cdb_create.yaml similarity index 50% rename from docs/multitenant/usecase01/cdb_create.yaml rename to docs/multitenant/ords-based/provisioning/singlenamespace/cdb_create.yaml index 01fc0a18..5e020de6 100644 --- a/docs/multitenant/usecase01/cdb_create.yaml +++ b/docs/multitenant/ords-based/provisioning/singlenamespace/cdb_create.yaml @@ -1,4 +1,4 @@ -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: CDB metadata: name: cdb-dev @@ -11,34 +11,39 @@ spec: replicas: 1 sysAdminPwd: secret: - secretName: "cdb1-secret" - key: "sysadmin_pwd" + secretName: "[...]" + key: "[...]" ordsPwd: secret: - secretName: "cdb1-secret" - key: "ords_pwd" + secretName: "[...]" + key: "[...]" cdbAdminUser: secret: - secretName: "cdb1-secret" - key: "cdbadmin_user" + secretName: "[...]" + key: "[...]" cdbAdminPwd: secret: - secretName: "cdb1-secret" - key: "cdbadmin_pwd" + secretName: "[...]" + key: "[...]" webServerUser: secret: - secretName: "cdb1-secret" - key: "webserver_user" + secretName: "[...]" + key: "[...]" webServerPwd: secret: - secretName: "cdb1-secret" - key: "webserver_pwd" + secretName: "[...]" + key: "[...]" cdbTlsKey: secret: - secretName: "db-tls" - key: "tls.key" + secretName: "[...]" + key: "[...]" cdbTlsCrt: secret: - secretName: "db-tls" - key: "tls.crt" + secretName: "[...]" + key: "[...]" + cdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" + diff --git a/docs/multitenant/provisioning/singlenamespace/cdb_secret.yaml b/docs/multitenant/ords-based/provisioning/singlenamespace/cdb_secret.yaml similarity index 100% rename from docs/multitenant/provisioning/singlenamespace/cdb_secret.yaml rename to docs/multitenant/ords-based/provisioning/singlenamespace/cdb_secret.yaml diff --git a/docs/multitenant/provisioning/singlenamespace/pdb_clone.yaml b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_clone.yaml similarity index 78% rename from docs/multitenant/provisioning/singlenamespace/pdb_clone.yaml rename to docs/multitenant/ords-based/provisioning/singlenamespace/pdb_clone.yaml index 0ecc3c70..964d1e5e 100644 --- a/docs/multitenant/provisioning/singlenamespace/pdb_clone.yaml +++ b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_clone.yaml @@ -21,12 +21,12 @@ spec: assertivePdbDeletion: true adminName: secret: - secretName: "pdb1-secret" - key: "sysadmin_user" + secretName: "[...]" + key: "[...]" adminPwd: secret: - secretName: "pdb1-secret" - key: "sysadmin_pwd" + secretName: "[...]" + key: "[...]" pdbTlsKey: secret: secretName: "db-tls" @@ -47,4 +47,14 @@ spec: secret: secretName: "pdb1-secret" key: "webserver_pwd" + secretName: "[...]" + key: "[...]" + webServerPwd: + secret: + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" action: "Clone" diff --git a/docs/multitenant/usecase01/pdb_close.yaml b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_close.yaml similarity index 67% rename from docs/multitenant/usecase01/pdb_close.yaml rename to docs/multitenant/ords-based/provisioning/singlenamespace/pdb_close.yaml index 5917d33a..06d92469 100644 --- a/docs/multitenant/usecase01/pdb_close.yaml +++ b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_close.yaml @@ -1,4 +1,4 @@ -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: PDB metadata: name: pdb1 @@ -12,12 +12,12 @@ spec: pdbName: "pdbdev" adminName: secret: - secretName: "pdb1-secret" - key: "sysadmin_user" + secretName: "[...]" + key: "[...]" adminPwd: secret: - secretName: "pdb1-secret" - key: "sysadmin_pwd" + secretName: "[...]" + key: "[...]" pdbTlsKey: secret: secretName: "db-tls" @@ -32,12 +32,16 @@ spec: key: "ca.crt" webServerUser: secret: - secretName: "pdb1-secret" - key: "webserver_user" + secretName: "[...]" + key: "[...]" webServerPwd: secret: - secretName: "pdb1-secret" - key: "webserver_pwd" + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" pdbState: "CLOSE" modifyOption: "IMMEDIATE" action: "Modify" diff --git a/docs/multitenant/usecase01/pdb_create.yaml b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_create.yaml similarity index 69% rename from docs/multitenant/usecase01/pdb_create.yaml rename to docs/multitenant/ords-based/provisioning/singlenamespace/pdb_create.yaml index be3581ad..2744223e 100644 --- a/docs/multitenant/usecase01/pdb_create.yaml +++ b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_create.yaml @@ -1,4 +1,4 @@ -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: PDB metadata: name: pdb1 @@ -12,12 +12,12 @@ spec: pdbName: "pdbdev" adminName: secret: - secretName: "pdb1-secret" - key: "sysadmin_user" + secretName: "[...]" + key: "[...]" adminPwd: secret: - secretName: "pdb1-secret" - key: "sysadmin_pwd" + secretName: "[...]" + key: "[...]" pdbTlsKey: secret: secretName: "db-tls" @@ -32,12 +32,16 @@ spec: key: "ca.crt" webServerUser: secret: - secretName: "pdb1-secret" - key: "webserver_user" + secretName: "[...]" + key: "[...]" webServerPwd: secret: - secretName: "pdb1-secret" - key: "webserver_pwd" + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" fileNameConversions: "NONE" tdeImport: false totalSize: "1G" diff --git a/docs/multitenant/usecase01/pdb_delete.yaml b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_delete.yaml similarity index 72% rename from docs/multitenant/usecase01/pdb_delete.yaml rename to docs/multitenant/ords-based/provisioning/singlenamespace/pdb_delete.yaml index c22b546a..523ac1cb 100644 --- a/docs/multitenant/usecase01/pdb_delete.yaml +++ b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_delete.yaml @@ -1,4 +1,4 @@ -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: PDB metadata: name: pdb1 @@ -25,10 +25,15 @@ spec: key: "ca.crt" webServerUser: secret: - secretName: "pdb1-secret" - key: "webserver_user" + secretName: "[...]" + key: "[...]" webServerPwd: secret: - secretName: "pdb1-secret" - key: "webserver_pwd" + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" + diff --git a/docs/multitenant/usecase01/pdb_open.yaml b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_open.yaml similarity index 67% rename from docs/multitenant/usecase01/pdb_open.yaml rename to docs/multitenant/ords-based/provisioning/singlenamespace/pdb_open.yaml index 25fdccc4..866db3e4 100644 --- a/docs/multitenant/usecase01/pdb_open.yaml +++ b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_open.yaml @@ -1,4 +1,4 @@ -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: PDB metadata: name: pdb1 @@ -12,12 +12,12 @@ spec: pdbName: "pdbdev" adminName: secret: - secretName: "pdb1-secret" - key: "sysadmin_user" + secretName: "[...]" + key: "[...]" adminPwd: secret: - secretName: "pdb1-secret" - key: "sysadmin_pwd" + secretName: "[...]" + key: "[...]" pdbTlsKey: secret: secretName: "db-tls" @@ -32,12 +32,16 @@ spec: key: "ca.crt" webServerUser: secret: - secretName: "pdb1-secret" - key: "webserver_user" + secretName: "[...]" + key: "[...]" webServerPwd: secret: - secretName: "pdb1-secret" - key: "webserver_pwd" + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" action: "Modify" pdbState: "OPEN" modifyOption: "READ WRITE" diff --git a/docs/multitenant/usecase02/pdb_plug.yaml b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_plug.yaml similarity index 81% rename from docs/multitenant/usecase02/pdb_plug.yaml rename to docs/multitenant/ords-based/provisioning/singlenamespace/pdb_plug.yaml index 77c00b9c..e6605276 100644 --- a/docs/multitenant/usecase02/pdb_plug.yaml +++ b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_plug.yaml @@ -2,7 +2,7 @@ # Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: PDB metadata: name: pdb1 @@ -42,5 +42,14 @@ spec: secret: secretName: "pdb1-secret" key: "webserver_pwd" - + secretName: "[...]" + key: "[...]" + webServerPwd: + secret: + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/provisioning/singlenamespace/pdb_secret.yaml b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_secret.yaml similarity index 100% rename from docs/multitenant/provisioning/singlenamespace/pdb_secret.yaml rename to docs/multitenant/ords-based/provisioning/singlenamespace/pdb_secret.yaml diff --git a/docs/multitenant/usecase02/pdb_unplug.yaml b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_unplug.yaml similarity index 78% rename from docs/multitenant/usecase02/pdb_unplug.yaml rename to docs/multitenant/ords-based/provisioning/singlenamespace/pdb_unplug.yaml index 085d337e..4e404efe 100644 --- a/docs/multitenant/usecase02/pdb_unplug.yaml +++ b/docs/multitenant/ords-based/provisioning/singlenamespace/pdb_unplug.yaml @@ -2,7 +2,7 @@ # Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: PDB metadata: name: pdb1 @@ -36,4 +36,14 @@ spec: secret: secretName: "pdb1-secret" key: "webserver_pwd" + secretName: "[...]" + key: "[...]" + webServerPwd: + secret: + secretName: "[...]" + key: "[...]" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/README.md b/docs/multitenant/ords-based/usecase/README.md new file mode 100644 index 00000000..b6f5e590 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/README.md @@ -0,0 +1,112 @@ + + + +# Use case directory + +The use case directory contains the yaml files to test the multitenant controller functionalities: create ords pod and pdb operation *create / open / close / unplug / plug / delete / clone /map / parameter session* +In this exampl the cdb and pdbs resources are depolyed in different namespaces + +## Makefile helper + +Customizing yaml files (tns alias / credential / namespaces name etc...) is a long procedure prone to human error. A simple [makefile](../usecase/makefile) is available to quickly and safely configure yaml files with your system environment information. Just edit the [parameter file](../usecase/parameters.txt) before proceding. + +```text +[👉 CHECK PARAMETERS..................] +TNSALIAS...............:(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELA.... +ORDPWD.................:[Password for ORDS_PUBLIC_USER ] +SYSPWD.................:[SYS password] +WBUSER.................:[username for https authentication] +WBPASS.................:[password for https authentication] +PDBUSR.................:[pdb admin user] +PDBPWD.................:[pdb admin password] +CDBUSR.................:[cdb admin user e.g. C##DBAPI_CDB_ADMIN] +CDBPWD.................:[cdb admin password] +PDBNAMESPACE...........:[namespace for pdb] +CDBNAMESPACE...........:[namespace for cdb] +COMPANY................:oracle +APIVERSION.............:v4 ---> do not edit +``` + +⚠ **WARNING: The makefile is intended to speed up the usecase directory configuartion only, it is not supported, the editing and configuration of yaml files for production system is left up to the end user** + +### Pre requisistes: + +- Make sure that **kubectl** is properly configured. +- Make sure that all requirements listed in the [operator installation page](../../../../docs/installation/OPERATOR_INSTALLATION_README.md) are implemented. (role binding,webcert,etc) +- Make sure that administrative user on the container database is configured as documented. + +### Commands + +Review your configuraton running ```make check```; if all the parameters are correct then you can proceed with yaml files and certificates generation + +By excuting command ```make operator``` You will have in your directory an operator yaml file with the WATCH LIST required to operate with multiple namespaces. +Note that the yaml file is not applyed; you need to manually execute ```kubectl apply -f oracle-database-operator.yaml```. + +```bash +make operator +``` +You can generate all the other yaml files for pdb life cycle management using ```make genyaml``` + +```bash +make genyaml +``` + +list of generated yaml files + +```text +-rw-r--r-- 1 mmalvezz g900 137142 Nov 13 09:35 oracle-database-operator.yaml +-rw-r--r-- 1 mmalvezz g900 321 Nov 13 10:27 create_cdb_secrets.yaml +-rw-r--r-- 1 mmalvezz g900 234 Nov 13 10:27 create_pdb_secrets.yaml +-rw-r--r-- 1 mmalvezz g900 381 Nov 13 10:27 pdbnamespace_binding.yaml +-rw-r--r-- 1 mmalvezz g900 381 Nov 13 10:27 cdbnamespace_binding.yaml +-rw-r--r-- 1 mmalvezz g900 1267 Nov 13 10:27 create_ords_pod.yaml +-rw-r--r-- 1 mmalvezz g900 935 Nov 13 10:27 create_pdb1_resource.yaml +-rw-r--r-- 1 mmalvezz g900 935 Nov 13 10:27 create_pdb2_resource.yaml +-rw-r--r-- 1 mmalvezz g900 842 Nov 13 10:27 open_pdb1_resource.yaml +-rw-r--r-- 1 mmalvezz g900 842 Nov 13 10:27 open_pdb2_resource.yaml +-rw-r--r-- 1 mmalvezz g900 845 Nov 13 10:27 open_pdb3_resource.yaml +-rw-r--r-- 1 mmalvezz g900 842 Nov 13 10:27 close_pdb1_resource.yaml +-rw-r--r-- 1 mmalvezz g900 842 Nov 13 10:27 close_pdb2_resource.yaml +-rw-r--r-- 1 mmalvezz g900 846 Nov 13 10:27 close_pdb3_resource.yaml +-rw-r--r-- 1 mmalvezz g900 927 Nov 13 10:27 clone_pdb1_resource.yaml +-rw-r--r-- 1 mmalvezz g900 928 Nov 13 10:27 clone_pdb2_resource.yaml +-rw-r--r-- 1 mmalvezz g900 802 Nov 13 10:27 delete_pdb1_resource.yaml +-rw-r--r-- 1 mmalvezz g900 802 Nov 13 10:27 delete_pdb2_resource.yaml +-rw-r--r-- 1 mmalvezz g900 824 Nov 13 10:27 unplug_pdb1_resource.yaml +-rw-r--r-- 1 mmalvezz g900 992 Nov 13 10:27 plug_pdb1_resource.yaml +-rw-r--r-- 1 mmalvezz g900 887 Nov 13 10:27 map_pdb1_resource.yaml +-rw-r--r-- 1 mmalvezz g900 887 Nov 13 10:27 map_pdb2_resource.yaml +-rw-r--r-- 1 mmalvezz g900 890 Nov 13 10:27 map_pdb3_resource.yaml +``` + +The command ```make secretes ``` will configure database secrets credential and certificates secretes + +```bash +make secrets +``` + + + +The makefile includes other different targets that can be used to test the various pdb operations available. E.g. + +```makefile +run03.2: + @$(call msg,"clone pdb2-->pdb4") + $(KUBECTL) apply -f $(PDBCLONE2) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" pdb pdb4 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"clone pdb2-->pdb4 completed") + $(KUBECTL) get pdb pdb3 -n $(PDBNAMESPACE) +``` +The target ```run03.2``` clones pdb2 into pdb4 and wait for ```$TEST_EXEC_TIMEOUT``` for the operation to complete. + +### Output executions:. + +```make secrets``` + +![image](../images/makesecrets_1_1.png) + + + +```make runall``` executes different pdb operations including the cdb controller creation + +![image](../images/makerunall.png) \ No newline at end of file diff --git a/docs/multitenant/ords-based/usecase/cdbnamespace_binding.yaml b/docs/multitenant/ords-based/usecase/cdbnamespace_binding.yaml new file mode 100644 index 00000000..5fd355f4 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/cdbnamespace_binding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: oracle-database-operator-oracle-database-operator-manager-rolebinding2 + namespace: cdbnamespace +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system diff --git a/docs/multitenant/ords-based/usecase/clone_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase/clone_pdb1_resource.yaml new file mode 100644 index 00000000..5723f7c6 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/clone_pdb1_resource.yaml @@ -0,0 +1,50 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb3 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "new_clone" + srcPdbName: "pdbdev" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + assertivePdbDeletion: true + action: "Clone" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/clone_pdb2_resource.yaml b/docs/multitenant/ords-based/usecase/clone_pdb2_resource.yaml new file mode 100644 index 00000000..2b9fc70a --- /dev/null +++ b/docs/multitenant/ords-based/usecase/clone_pdb2_resource.yaml @@ -0,0 +1,50 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb4 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "new_clone2" + srcPdbName: "pdbprd" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + assertivePdbDeletion: true + action: "Clone" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/close_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase/close_pdb1_resource.yaml new file mode 100644 index 00000000..ae837ce0 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/close_pdb1_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/close_pdb2_resource.yaml b/docs/multitenant/ords-based/usecase/close_pdb2_resource.yaml new file mode 100644 index 00000000..1b5d1324 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/close_pdb2_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb2 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbprd" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/close_pdb3_resource.yaml b/docs/multitenant/ords-based/usecase/close_pdb3_resource.yaml new file mode 100644 index 00000000..f4a32938 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/close_pdb3_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb3 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: ""new_clone" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/create_ords_pod.yaml b/docs/multitenant/ords-based/usecase/create_ords_pod.yaml new file mode 100644 index 00000000..ad196c9d --- /dev/null +++ b/docs/multitenant/ords-based/usecase/create_ords_pod.yaml @@ -0,0 +1,48 @@ +apiVersion: database.oracle.com/v4 +kind: CDB +metadata: + name: cdb-dev + namespace: cdbnamespace +spec: + cdbName: "DB12" + ordsImage: _your_container_registry/ords-dboper:latest + ordsImagePullPolicy: "Always" + dbTnsurl : "T H I S I S J U S T A N E X A M P L E (DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS)))" + replicas: 1 + deletePdbCascade: true + sysAdminPwd: + secret: + secretName: "syspwd" + key: "e_syspwd.txt" + ordsPwd: + secret: + secretName: "ordpwd" + key: "e_ordpwd.txt" + cdbAdminUser: + secret: + secretName: "cdbusr" + key: "e_cdbusr.txt" + cdbAdminPwd: + secret: + secretName: "cdbpwd" + key: "e_cdbpwd.txt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + cdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + cdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/create_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase/create_pdb1_resource.yaml new file mode 100644 index 00000000..84e910e0 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/create_pdb1_resource.yaml @@ -0,0 +1,51 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + assertivePdbDeletion: true + fileNameConversions: "NONE" + unlimitedStorage: false + tdeImport: false + totalSize: "2G" + tempSize: "800M" + action: "Create" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/create_pdb2_resource.yaml b/docs/multitenant/ords-based/usecase/create_pdb2_resource.yaml new file mode 100644 index 00000000..0a71c7c3 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/create_pdb2_resource.yaml @@ -0,0 +1,51 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb2 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbprd" + assertivePdbDeletion: true + fileNameConversions: "NONE" + unlimitedStorage: false + tdeImport: false + totalSize: "2G" + tempSize: "800M" + action: "Create" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/delete_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase/delete_pdb1_resource.yaml new file mode 100644 index 00000000..3aba580c --- /dev/null +++ b/docs/multitenant/ords-based/usecase/delete_pdb1_resource.yaml @@ -0,0 +1,45 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + pdbName: "pdbdev" + action: "Delete" + dropAction: "INCLUDING" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/delete_pdb2_resource.yaml b/docs/multitenant/ords-based/usecase/delete_pdb2_resource.yaml new file mode 100644 index 00000000..59b50a64 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/delete_pdb2_resource.yaml @@ -0,0 +1,45 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb2 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + pdbName: "pdbprd" + action: "Delete" + dropAction: "INCLUDING" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/makefile b/docs/multitenant/ords-based/usecase/makefile new file mode 100644 index 00000000..dc881598 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/makefile @@ -0,0 +1,915 @@ +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# __ __ _ __ _ _ +# | \/ | __ _| | _____ / _(_) | ___ +# | |\/| |/ _` | |/ / _ \ |_| | |/ _ \ +# | | | | (_| | < __/ _| | | __/ +# |_| |_|\__,_|_|\_\___|_| |_|_|\___| +# | | | | ___| |_ __ ___ _ __ +# | |_| |/ _ \ | '_ \ / _ \ '__| +# | _ | __/ | |_) | __/ | +# |_| |_|\___|_| .__/ \___|_| +# |_| +# +# WARNING: Using this makefile helps you to customize yaml +# files. Edit parameters.txt with your enviroment +# informartion and execute the following steps +# +# 1) make operator +# it configures the operator yaml files with the +# watch namelist required by the multitenant controllers +# +# 2) make genyaml +# It automatically creates all the yaml files based on the +# information available in the parameters file +# +# 3) make secrets +# It configure the required secrets necessary to operate +# with pdbs multitenant controllers +# +# 4) make runall01 +# Start a series of operation create open close delete and so on +# +# LIST OF GENERAED YAML FILE +# +# ----------------------------- ---------------------------------- +# oracle-database-operator.yaml : oracle database operator +# cdbnamespace_binding.yaml : role binding for cdbnamespace +# pdbnamespace_binding.yaml : role binding for pdbnamespace +# create_ords_pod.yaml : create rest server pod +# create_pdb1_resource.yaml : create first pluggable database +# create_pdb2_resource.yaml : create second pluggable database +# open_pdb1_resource.yaml : open first pluggable database +# open_pdb2_resource.yaml : open second pluggable database +# close_pdb1_resource.yaml : close first pluggable database +# close_pdb2_resource.yaml : close second pluggable database +# clone_pdb_resource.yaml : clone thrid pluggable database +# clone_pdb2_resource.yaml : clone 4th pluggable database +# delete_pdb1_resource.yaml : delete first pluggable database +# delete_pdb2_resource.yaml : delete sencond pluggable database +# delete_pdb3_resource.yaml : delete thrid pluggable database +# unplug_pdb1_resource.yaml : unplug first pluggable database +# plug_pdb1_resource.yaml : plug first pluggable database +# map_pdb1_resource.yaml : map the first pluggable database +# config_map.yam : pdb parameters array +# +DATE := `date "+%y%m%d%H%M%S"` +###################### +# PARAMETER SECTIONS # +###################### + +export PARAMETERS=parameters.txt +export TNSALIAS=$(shell cat $(PARAMETERS) |grep -v ^\#|grep TNSALIAS|cut -d : -f 2) +export ORDPWD=$(shell cat $(PARAMETERS)|grep -v ^\#|grep ORDPWD|cut -d : -f 2) +export SYSPWD=$(shell cat $(PARAMETERS)|grep -v ^\#|grep SYSPWD|cut -d : -f 2) +export WBUSER=$(shell cat $(PARAMETERS)|grep -v ^\#|grep WBUSER|cut -d : -f 2) +export WBPASS=$(shell cat $(PARAMETERS)|grep -v ^\#|grep WBPASS|cut -d : -f 2) +export PDBUSR=$(shell cat $(PARAMETERS)|grep -v ^\#|grep PDBUSR|cut -d : -f 2) +export PDBPWD=$(shell cat $(PARAMETERS)|grep -v ^\#|grep PDBPWD|cut -d : -f 2) +export CDBUSR=$(shell cat $(PARAMETERS)|grep -v ^\#|grep CDBUSR|cut -d : -f 2) +export CDBPWD=$(shell cat $(PARAMETERS)|grep -v ^\#|grep CDBPWD|cut -d : -f 2) +export PDBNAMESPACE=$(shell cat $(PARAMETERS)|grep -v ^\#|grep PDBNAMESPACE|cut -d : -f 2) +export CDBNAMESPACE=$(shell cat $(PARAMETERS)|grep -v ^\#|grep CDBNAMESPACE|cut -d : -f 2) +export ORDSIMG=$(shell cat $(PARAMETERS)|grep -v ^\#|grep ORDSIMG|cut -d : -f 2,3) +export COMPANY=$(shell cat $(PARAMETERS)|grep -v ^\#|grep COMPANY|cut -d : -f 2) +export APIVERSION=$(shell cat $(PARAMETERS)|grep -v ^\#|grep APIVERSION|cut -d : -f 2) +export OPRNAMESPACE=oracle-database-operator-system +export ORACLE_OPERATOR_YAML=../../../../oracle-database-operator.yaml +export TEST_EXEC_TIMEOUT=3m +export IMAGE=oracle/ords-dboper:latest +export ORDSIMGDIR=../../../../ords + +REST_SERVER=ords +SKEY=tls.key +SCRT=tls.crt +CART=ca.crt +PRVKEY=ca.key +PUBKEY=public.pem +COMPANY=oracle +RUNTIME=/usr/bin/podman + +################# +### FILE LIST ### +################# + +export ORDS_POD=create_ords_pod.yaml + +export CDB_SECRETS=create_cdb_secrets.yaml +export PDB_SECRETS=create_pdb_secrets.yaml + +export PDBCRE1=create_pdb1_resource.yaml +export PDBCRE2=create_pdb2_resource.yaml + +export PDBCLOSE1=close_pdb1_resource.yaml +export PDBCLOSE2=close_pdb2_resource.yaml +export PDBCLOSE3=close_pdb3_resource.yaml + +export PDBOPEN1=open_pdb1_resource.yaml +export PDBOPEN2=open_pdb2_resource.yaml +export PDBOPEN3=open_pdb3_resource.yaml + +export PDBCLONE1=clone_pdb1_resource.yaml +export PDBCLONE2=clone_pdb2_resource.yaml + +export PDBDELETE1=delete_pdb1_resource.yaml +export PDBDELETE2=delete_pdb2_resource.yaml +export PDBDELETE3=delete_pdb3_resource.yaml + +export PDBUNPLUG1=unplug_pdb1_resource.yaml +export PDBPLUG1=plug_pdb1_resource.yaml + +export PDBMAP1=map_pdb1_resource.yaml +export PDBMAP2=map_pdb2_resource.yaml +export PDBMAP3=map_pdb3_resource.yaml + +export PDBMAP1=map_pdb1_resource.yaml +export PDBMAP2=map_pdb2_resource.yaml +export PDBMAP3=map_pdb3_resource.yaml + + +##BINARIES +export KUBECTL=/usr/bin/kubectl +OPENSSL=/usr/bin/openssl +ECHO=/usr/bin/echo +RM=/usr/bin/rm +CP=/usr/bin/cp +TAR=/usr/bin/tar +MKDIR=/usr/bin/mkdir +SED=/usr/bin/sed + +define msg +@printf "\033[31;7m%s\033[0m\r" "......................................]" +@printf "\033[31;7m[\xF0\x9F\x91\x89 %s\033[0m\n" $(1) +endef + +check: + $(call msg,"CHECK PARAMETERS") + @printf "TNSALIAS...............:%.60s....\n" $(TNSALIAS) + @printf "ORDPWD.................:%s\n" $(ORDPWD) + @printf "SYSPWD.................:%s\n" $(SYSPWD) + @printf "WBUSER.................:%s\n" $(WBUSER) + @printf "WBPASS.................:%s\n" $(WBPASS) + @printf "PDBUSR.................:%s\n" $(PDBUSR) + @printf "PDBPWD.................:%s\n" $(PDBPWD) + @printf "CDBUSR.................:%s\n" $(CDBUSR) + @printf "CDBPWD.................:%s\n" $(CDBPWD) + @printf "PDBNAMESPACE...........:%s\n" $(PDBNAMESPACE) + @printf "CDBNAMESPACE...........:%s\n" $(CDBNAMESPACE) + @printf "COMPANY................:%s\n" $(COMPANY) + @printf "APIVERSION.............:%s\n" $(APIVERSION) + + +tlscrt: + $(call msg,"TLS GENERATION") + #$(OPENSSL) genrsa -out $(PRVKEY) 2048 + $(OPENSSL) genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > $(PRVKEY) + $(OPENSSL) req -new -x509 -days 365 -key $(PRVKEY) \ + -subj "/C=CN/ST=GD/L=SZ/O=$(COMPANY), Inc./CN=$(COMPANY) Root CA" -out ca.crt + $(OPENSSL) req -newkey rsa:2048 -nodes -keyout $(SKEY) -subj \ + "/C=CN/ST=GD/L=SZ/O=$(COMPANY), Inc./CN=cdb-dev-$(REST_SERVER).$(CDBNAMESPACE)" -out server.csr + $(ECHO) "subjectAltName=DNS:cdb-dev-$(REST_SERVER).$(CDBNAMESPACE)" > extfile.txt + $(OPENSSL) x509 -req -extfile extfile.txt -days 365 -in server.csr -CA ca.crt -CAkey $(PRVKEY) -CAcreateserial -out $(SCRT) + $(OPENSSL) rsa -in $(PRVKEY) -outform PEM -pubout -out $(PUBKEY) + +tlssec: + $(call msg,"GENERATE TLS SECRET") + $(KUBECTL) create secret tls db-tls --key="$(SKEY)" --cert="$(SCRT)" -n $(CDBNAMESPACE) + $(KUBECTL) create secret generic db-ca --from-file="$(CART)" -n $(CDBNAMESPACE) + $(KUBECTL) create secret tls db-tls --key="$(SKEY)" --cert="$(SCRT)" -n $(PDBNAMESPACE) + $(KUBECTL) create secret generic db-ca --from-file="$(CART)" -n $(PDBNAMESPACE) + + +delsec: + $(call msg,"CLEAN OLD SECRETS") + $(eval SECRETSP:=$(shell kubectl get secrets -n $(PDBNAMESPACE) -o custom-columns=":metadata.name" --no-headers) ) + $(eval SECRETSL:=$(shell kubectl get secrets -n $(CDBNAMESPACE) -o custom-columns=":metadata.name" --no-headers) ) + @[ "${SECRETSP}" ] && ( \ + printf "Deleteing secrets in namespace -n $(PDBNAMESPACE)\n") &&\ + ($(KUBECTL) delete secret $(SECRETSP) -n $(PDBNAMESPACE))\ + || ( echo "No screts in namespace $(PDBNAMESPACE)") + @[ "${SECRETSL}" ] && ( \ + printf "Deleteing secrets in namespace -n $(CDBNAMESPACE)\n") &&\ + ($(KUBECTL) delete secret $(SECRETSL) -n $(CDBNAMESPACE))\ + || ( echo "No screts in namespace $(PDBNAMESPACE)") + + +###### ENCRYPTED SECRETS ###### +export PRVKEY=ca.key +export PUBKEY=public.pem +WBUSERFILE=wbuser.txt +WBPASSFILE=wbpass.txt +CDBUSRFILE=cdbusr.txt +CDBPWDFILE=cdbpwd.txt +SYSPWDFILE=syspwd.txt +ORDPWDFILE=ordpwd.txt +PDBUSRFILE=pdbusr.txt +PDBPWDFILE=pdbpwd.txt + + + +secrets: delsec tlscrt tlssec + $(OPENSSL) rsa -in $(PRVKEY) -outform PEM -pubout -out $(PUBKEY) + $(KUBECTL) create secret generic pubkey --from-file=publicKey=$(PUBKEY) -n $(CDBNAMESPACE) + $(KUBECTL) create secret generic prvkey --from-file=privateKey=$(PRVKEY) -n $(CDBNAMESPACE) + $(KUBECTL) create secret generic prvkey --from-file=privateKey="$(PRVKEY)" -n $(PDBNAMESPACE) + @$(ECHO) $(WBUSER) > $(WBUSERFILE) + @$(ECHO) $(WBPASS) > $(WBPASSFILE) + @$(ECHO) $(CDBPWD) > $(CDBPWDFILE) + @$(ECHO) $(CDBUSR) > $(CDBUSRFILE) + @$(ECHO) $(SYSPWD) > $(SYSPWDFILE) + @$(ECHO) $(ORDPWD) > $(ORDPWDFILE) + @$(ECHO) $(PDBUSR) > $(PDBUSRFILE) + @$(ECHO) $(PDBPWD) > $(PDBPWDFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(WBUSERFILE) |base64 > e_$(WBUSERFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(WBPASSFILE) |base64 > e_$(WBPASSFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(CDBPWDFILE) |base64 > e_$(CDBPWDFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(CDBUSRFILE) |base64 > e_$(CDBUSRFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(SYSPWDFILE) |base64 > e_$(SYSPWDFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(ORDPWDFILE) |base64 > e_$(ORDPWDFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(PDBUSRFILE) |base64 > e_$(PDBUSRFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(PDBPWDFILE) |base64 > e_$(PDBPWDFILE) + $(KUBECTL) create secret generic wbuser --from-file=e_$(WBUSERFILE) -n $(CDBNAMESPACE) + $(KUBECTL) create secret generic wbpass --from-file=e_$(WBPASSFILE) -n $(CDBNAMESPACE) + $(KUBECTL) create secret generic wbuser --from-file=e_$(WBUSERFILE) -n $(PDBNAMESPACE) + $(KUBECTL) create secret generic wbpass --from-file=e_$(WBPASSFILE) -n $(PDBNAMESPACE) + $(KUBECTL) create secret generic cdbpwd --from-file=e_$(CDBPWDFILE) -n $(CDBNAMESPACE) + $(KUBECTL) create secret generic cdbusr --from-file=e_$(CDBUSRFILE) -n $(CDBNAMESPACE) + $(KUBECTL) create secret generic syspwd --from-file=e_$(SYSPWDFILE) -n $(CDBNAMESPACE) + $(KUBECTL) create secret generic ordpwd --from-file=e_$(ORDPWDFILE) -n $(CDBNAMESPACE) + $(KUBECTL) create secret generic pdbusr --from-file=e_$(PDBUSRFILE) -n $(PDBNAMESPACE) + $(KUBECTL) create secret generic pdbpwd --from-file=e_$(PDBPWDFILE) -n $(PDBNAMESPACE) + $(RM) $(WBUSERFILE) $(WBPASSFILE) $(CDBPWDFILE) $(CDBUSRFILE) $(SYSPWDFILE) $(ORDPWDFILE) $(PDBUSRFILE) $(PDBPWDFILE) + $(RM) e_$(WBUSERFILE) e_$(WBPASSFILE) e_$(CDBPWDFILE) e_$(CDBUSRFILE) e_$(SYSPWDFILE) e_$(ORDPWDFILE) e_$(PDBUSRFILE) e_$(PDBPWDFILE) + + +### YAML FILE SECTION ### +operator: + $(CP) ${ORACLE_OPERATOR_YAML} . + ${CP} `basename ${ORACLE_OPERATOR_YAML}` `basename ${ORACLE_OPERATOR_YAML}`.ORG + $(SED) -i 's/value: ""/value: $(OPRNAMESPACE),$(PDBNAMESPACE),$(CDBNAMESPACE)/g' `basename ${ORACLE_OPERATOR_YAML}` + + +define _script00 +cat < authsection01.yaml + sysAdminPwd: + secret: + secretName: "syspwd" + key: "e_syspwd.txt" + ordsPwd: + secret: + secretName: "ordpwd" + key: "e_ordpwd.txt" + cdbAdminUser: + secret: + secretName: "cdbusr" + key: "e_cdbusr.txt" + cdbAdminPwd: + secret: + secretName: "cdbpwd" + key: "e_cdbpwd.txt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + cdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + cdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" +EOF + +cat< authsection02.yaml + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" +EOF + + +cat < ${PDBNAMESPACE}_binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: oracle-database-operator-oracle-database-operator-manager-rolebinding1 + namespace: ${PDBNAMESPACE} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system +EOF + +cat < ${CDBNAMESPACE}_binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: oracle-database-operator-oracle-database-operator-manager-rolebinding2 + namespace: ${CDBNAMESPACE} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system +EOF + +endef +export script00 = $(value _script00) +secyaml: + @ eval "$$script00" + +#echo ords pod creation +define _script01 +cat < ${ORDS_POD} +apiVersion: database.oracle.com/${APIVERSION} +kind: CDB +metadata: + name: cdb-dev + namespace: ${CDBNAMESPACE} +spec: + cdbName: "DB12" + ordsImage: ${ORDSIMG} + ordsImagePullPolicy: "Always" + dbTnsurl : ${TNSALIAS} + replicas: 1 + deletePdbCascade: true +EOF + +cat authsection01.yaml >> ${ORDS_POD} + +endef +export script01 = $(value _script01) + + +define _script02 + +cat <${PDBCRE1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + assertivePdbDeletion: true + fileNameConversions: "NONE" + unlimitedStorage: false + tdeImport: false + totalSize: "2G" + tempSize: "800M" + action: "Create" +EOF + +cat < ${PDBCRE2} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb2 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbprd" + assertivePdbDeletion: true + fileNameConversions: "NONE" + unlimitedStorage: false + tdeImport: false + totalSize: "2G" + tempSize: "800M" + action: "Create" +EOF + +cat <${PDBOPEN1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" +EOF + +cat <${PDBOPEN2} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb2 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbprd" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" +EOF + +cat <${PDBOPEN3} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb3 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "new_clone" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" +EOF + +cat <${PDBCLOSE1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" +EOF + +cat <${PDBCLOSE2} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb2 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbprd" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" +EOF + +cat <${PDBCLOSE3} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb3 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: ""new_clone" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" +EOF + +cat < ${PDBCLONE1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb3 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "new_clone" + srcPdbName: "pdbdev" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + assertivePdbDeletion: true + action: "Clone" +EOF + +cat < ${PDBCLONE2} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb4 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "new_clone2" + srcPdbName: "pdbprd" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + assertivePdbDeletion: true + action: "Clone" +EOF + + +cat < ${PDBDELETE1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + pdbName: "pdbdev" + action: "Delete" + dropAction: "INCLUDING" +EOF + +cat < ${PDBDELETE2} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb2 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + pdbName: "pdbprd" + action: "Delete" + dropAction: "INCLUDING" +EOF + +cat < ${PDBUNPLUG1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "Unplug" +EOF + +cat <${PDBPLUG1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "plug" + fileNameConversions: "NONE" + sourceFileNameConversions: "NONE" + copyAction: "MOVE" + totalSize: "1G" + tempSize: "100M" + assertivePdbDeletion: true + action: "Plug" +EOF + +cat <${PDBMAP1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + assertivePdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" +EOF + +cat <${PDBMAP2} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb2 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbprd" + assertivePdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" +EOF + + +cat <${PDBMAP3} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb3 + namespace: ${PDBNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${CDBNAMESPACE}" + cdbName: "DB12" + pdbName: "new_clone" + assertivePdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" +EOF + + +## Auth information +for _file in ${PDBCRE1} ${PDBCRE2} ${PDBOPEN1} ${PDBOPEN2} ${PDBOPEN3} ${PDBCLOSE1} ${PDBCLOSE2} ${PDBCLOSE3} ${PDBCLONE1} ${PDBCLONE2} ${PDBDELETE1} ${PDBDELETE2} ${PDBUNPLUG1} ${PDBPLUG1} ${PDBMAP1} ${PDBMAP2} ${PDBMAP3} +do +ls -ltr ${_file} + cat authsection02.yaml >> ${_file} +done +rm authsection02.yaml +rm authsection01.yaml +endef + +export script02 = $(value _script02) + +genyaml: secyaml + @ eval "$$script01" + @ eval "$$script02" + +cleanyaml: + - $(RM) $(PDBMAP3) $(PDBMAP2) $(PDBMAP1) $(PDBPLUG1) $(PDBUNPLUG1) $(PDBDELETE2) $(PDBDELETE1) $(PDBCLONE2) $(PDBCLONE1) $(PDBCLOSE3) $(PDBCLOSE2) $(PDBCLOSE1) $(PDBOPEN3) $(PDBOPEN2) $(PDBOPEN1) $(PDBCRE2) $(PDBCRE1) $(ORDS_POD) $(CDB_SECRETS) $(PDB_SECRETS) + - $(RM) ${PDBNAMESPACE}_binding.yaml ${CDBNAMESPACE}_binding.yaml + + +cleancrt: + - $(RM) $(SKEY) $(SCRT) $(CART) $(PRVKEY) $(PUBKEY) server.csr extfile.txt ca.srl + + +################# +### PACKAGING ### +################# + +pkg: + - $(RM) -rf /tmp/pkgtestplan + $(MKDIR) /tmp/pkgtestplan + $(CP) -R * /tmp/pkgtestplan + $(CP) ../../../../oracle-database-operator.yaml /tmp/pkgtestplan/ + $(TAR) -C /tmp -cvf ~/pkgtestplan_$(DATE).tar pkgtestplan + +################ +### diag ### +################ + +login: + $(KUBECTL) exec `$(KUBECTL) get pods -n $(CDBNAMESPACE)|grep ords|cut -d ' ' -f 1` -n $(CDBNAMESPACE) -it -- /bin/bash + + +reloadop: + echo "RESTARTING OPERATOR" + $(eval OP1 := $(shell $(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -1|cut -d ' ' -f 1 )) + $(eval OP2 := $(shell $(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -2|tail -1|cut -d ' ' -f 1 )) + $(eval OP3 := $(shell $(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|tail -1|cut -d ' ' -f 1 )) + $(KUBECTL) get pod $(OP1) -n $(OPRNAMESPACE) -o yaml | kubectl replace --force -f - + $(KUBECTL) get pod $(OP2) -n $(OPRNAMESPACE) -o yaml | kubectl replace --force -f - + $(KUBECTL) get pod $(OP3) -n $(OPRNAMESPACE) -o yaml | kubectl replace --force -f - + + +dump: + @$(eval TMPSP := $(shell date "+%y%m%d%H%M%S" )) + @$(eval DIAGFILE := ./opdmp.$(TMPSP)) + @>$(DIAGFILE) + @echo "OPERATOR DUMP" >> $(DIAGFILE) + @echo "~~~~~~~~~~~~~" >> $(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -1|cut -d ' ' -f 1` -n $(OPRNAMESPACE) >>$(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -2|tail -1 | cut -d ' ' -f 1` -n $(OPRNAMESPACE) >>$(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|tail -1|cut -d ' ' -f 1` -n $(OPRNAMESPACE) >>$(DIAGFILE) + +####################################################### +#### TEST SECTION #### +####################################################### + +run00: + @$(call msg,"cdb pod creation") + - $(KUBECTL) delete cdb cdb-dev -n $(CDBNAMESPACE) + $(KUBECTL) apply -f $(ORDS_POD) + time $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" cdb cdb-dev -n $(CDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"cdb pod completed") + $(KUBECTL) get cdb -n $(CDBNAMESPACE) + $(KUBECTL) get pod -n $(CDBNAMESPACE) + +run01.1: + @$(call msg,"pdb pdb1 creation") + $(KUBECTL) apply -f $(PDBCRE1) + time $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" pdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "pdb pdb1 creation completed") + $(KUBECTL) get pdb pdb1 -n $(PDBNAMESPACE) + +run01.2: + @$(call msg, "pdb pdb2 creation") + $(KUBECTL) apply -f $(PDBCRE2) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" pdb pdb2 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "pdb pdb2 creation completed") + $(KUBECTL) get pdb pdb2 -n $(PDBNAMESPACE) + +run02.1: + @$(call msg, "pdb pdb1 open") + $(KUBECTL) apply -f $(PDBOPEN1) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="READ WRITE" pdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "pdb pdb1 open completed") + $(KUBECTL) get pdb pdb1 -n $(PDBNAMESPACE) + +run02.2: + @$(call msg,"pdb pdb2 open") + $(KUBECTL) apply -f $(PDBOPEN2) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="READ WRITE" pdb pdb2 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"pdb pdb2 open completed") + $(KUBECTL) get pdb pdb2 -n $(PDBNAMESPACE) + + +run03.1: + @$(call msg,"clone pdb1-->pdb3") + $(KUBECTL) apply -f $(PDBCLONE1) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" pdb pdb3 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"clone pdb1-->pdb3 completed") + $(KUBECTL) get pdb pdb3 -n $(PDBNAMESPACE) + + +run03.2: + @$(call msg,"clone pdb2-->pdb4") + $(KUBECTL) apply -f $(PDBCLONE2) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" pdb pdb4 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"clone pdb2-->pdb4 completed") + $(KUBECTL) get pdb pdb3 -n $(PDBNAMESPACE) + + +run04.1: + @$(call msg,"pdb pdb1 close") + $(KUBECTL) apply -f $(PDBCLOSE1) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="MOUNTED" pdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "pdb pdb1 close completed") + $(KUBECTL) get pdb pdb1 -n $(PDBNAMESPACE) + +run04.2: + @$(call msg,"pdb pdb2 close") + $(KUBECTL) apply -f $(PDBCLOSE2) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="MOUNTED" pdb pdb2 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"pdb pdb2 close completed") + $(KUBECTL) get pdb pdb2 -n $(PDBNAMESPACE) + +run05.1: + @$(call msg,"pdb pdb1 unplug") + $(KUBECTL) apply -f $(PDBUNPLUG1) + $(KUBECTL) wait --for=delete pdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"pdb pdb1 unplug completed") + +run06.1: + @$(call msg, "pdb pdb1 plug") + $(KUBECTL) apply -f $(PDBPLUG1) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" pdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "pdb pdb1 plug completed") + $(KUBECTL) get pdb pdb1 -n $(PDBNAMESPACE) + +run07.1: + @$(call msg,"pdb pdb1 delete ") + - $(KUBECTL) apply -f $(PDBCLOSE1) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="MOUNTED" pdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + $(KUBECTL) apply -f $(PDBDELETE1) + $(KUBECTL) wait --for=delete pdb pdb1 -n $(PDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"pdb pdb1 delete") + $(KUBECTL) get pdb -n $(PDBNAMESPACE) + +run99.1: + $(KUBECTL) delete cdb cdb-dev -n cdbnamespace + $(KUBECTL) wait --for=delete cdb cdb-dev -n $(CDBNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + $(KUBECTL) get cdb -n cdbnamespaace + $(KUBECTL) get pdb -n pdbnamespaace + + +## SEQ | ACTION +## ----+---------------- +## 00 | create ords pod +## 01 | create pdb +## 02 | open pdb +## 03 | clone pdb +## 04 | close pdb +## 05 | unpug pdb +## 06 | plug pdb +## 07 | delete pdb (declarative) + + +runall01: run00 run01.1 run01.2 run03.1 run03.2 run04.1 run05.1 run06.1 run02.1 run07.1 + + +###### BUILD ORDS IMAGE ###### + +createimage: + $(RUNTIME) build -t $(IMAGE) $(ORDSIMGDIR) + +createimageproxy: + $(RUNTIME) build -t $(IMAGE) $(ORDSIMGDIR) --build-arg https_proxy=$(HTTPS_PROXY) --build-arg http_proxy=$(HTTP_PROXY) + +tagimage: + @echo "TAG IMAGE" + $(RUNTIME) tag $(IMAGE) $(ORDSIMG) + +push: + $(RUNTIME) push $(ORDSIMG) + + diff --git a/docs/multitenant/ords-based/usecase/map_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase/map_pdb1_resource.yaml new file mode 100644 index 00000000..b71b59d5 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/map_pdb1_resource.yaml @@ -0,0 +1,49 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + assertivePdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/map_pdb2_resource.yaml b/docs/multitenant/ords-based/usecase/map_pdb2_resource.yaml new file mode 100644 index 00000000..75d056d0 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/map_pdb2_resource.yaml @@ -0,0 +1,49 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb2 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbprd" + assertivePdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/map_pdb3_resource.yaml b/docs/multitenant/ords-based/usecase/map_pdb3_resource.yaml new file mode 100644 index 00000000..3523aa68 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/map_pdb3_resource.yaml @@ -0,0 +1,49 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb3 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "new_clone" + assertivePdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/open_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase/open_pdb1_resource.yaml new file mode 100644 index 00000000..93a1d43a --- /dev/null +++ b/docs/multitenant/ords-based/usecase/open_pdb1_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/open_pdb2_resource.yaml b/docs/multitenant/ords-based/usecase/open_pdb2_resource.yaml new file mode 100644 index 00000000..deb27f9a --- /dev/null +++ b/docs/multitenant/ords-based/usecase/open_pdb2_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb2 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbprd" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/open_pdb3_resource.yaml b/docs/multitenant/ords-based/usecase/open_pdb3_resource.yaml new file mode 100644 index 00000000..586f2f57 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/open_pdb3_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb3 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "new_clone" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/parameters.txt b/docs/multitenant/ords-based/usecase/parameters.txt new file mode 100644 index 00000000..64dc3759 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/parameters.txt @@ -0,0 +1,61 @@ + +######################## +## REST SERVER IMAGE ### +######################## + +ORDSIMG:_your_container_registry/ords-dboper:latest + +############################## +## TNS URL FOR CDB CREATION ## +############################## +TNSALIAS:"T H I S I S J U S T A N E X A M P L E (DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS)))" + +########################################### +## ORDS PUBLIC USER ## +########################################### +ORDPWD:change_me_please + +########################################### +## SYSPASSWORD ## +########################################### +SYSPWD:change_me_please + +####################### +## HTTPS CREDENTIAL ### +####################### + +WBUSER:change_me_please +WBPASS:change_me_please + +##################### +## PDB ADMIN USER ### +##################### + +PDBUSR:change_me_please +PDBPWD:change_me_please + +##################### +## CDB ADMIN USER ### +##################### + +CDBUSR:C##DBAPI_CDB_ADMIN +CDBPWD:change_me_please + +################### +### NAMESPACES #### +################### + +PDBNAMESPACE:pdbnamespace +CDBNAMESPACE:cdbnamespace + +#################### +### COMPANY NAME ### +#################### + +COMPANY:oracle + +#################### +### APIVERSION ### +#################### + +APIVERSION:v4 diff --git a/docs/multitenant/ords-based/usecase/pdbnamespace_binding.yaml b/docs/multitenant/ords-based/usecase/pdbnamespace_binding.yaml new file mode 100644 index 00000000..5af79ed6 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/pdbnamespace_binding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: oracle-database-operator-oracle-database-operator-manager-rolebinding1 + namespace: pdbnamespace +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system diff --git a/docs/multitenant/ords-based/usecase/plug_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase/plug_pdb1_resource.yaml new file mode 100644 index 00000000..9eb5ed77 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/plug_pdb1_resource.yaml @@ -0,0 +1,53 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "plug" + fileNameConversions: "NONE" + sourceFileNameConversions: "NONE" + copyAction: "MOVE" + totalSize: "1G" + tempSize: "100M" + assertivePdbDeletion: true + action: "Plug" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase/unplug_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase/unplug_pdb1_resource.yaml new file mode 100644 index 00000000..0036d5f7 --- /dev/null +++ b/docs/multitenant/ords-based/usecase/unplug_pdb1_resource.yaml @@ -0,0 +1,46 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "Unplug" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/usecase01/README.md b/docs/multitenant/ords-based/usecase01/README.md similarity index 85% rename from docs/multitenant/usecase01/README.md rename to docs/multitenant/ords-based/usecase01/README.md index 7352257e..0020541c 100644 --- a/docs/multitenant/usecase01/README.md +++ b/docs/multitenant/ords-based/usecase01/README.md @@ -50,7 +50,9 @@ The following table reports the parameters required to configure and use oracle | pdbTlsKey | | [standalone.https.cert.key][key] | | pdbTlsCrt | | [standalone.https.cert][cr] | | pdbTlsCat | | certificate authority | -| assertivePdbDeletion | boolean | [turn on imperative approach on crd deleteion][imperative] | +| cdbOrdsPrvKey | | private key (cdb crd) | +| pdbOrdsPrvKey | | private key (pdb crd) | +| assertivePdbDeletion | boolean | [turn on imperative approach on crd deleteion][imperative] | > A [makfile](./makefile) is available to sped up the command execution for the multitenant setup and test. See the comments in the header of file @@ -161,63 +163,6 @@ GRANT SYSDBA TO CONTAINER = ALL; GRANT CREATE SESSION TO CONTAINER = ALL; ``` ---- - -#### Create CDB secret - -+ Create secret for CDB connection - -```bash -kubectl apply -f cdb_secret.yaml -n oracle-database-operator-system - -``` -Exmaple: **cdb_secret.yaml** - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: cdb1-secret - namespace: oracle-database-operator-system -type: Opaque -data: - ords_pwd: "encoded value" - sysadmin_pwd: "encoded value" - cdbadmin_user: "encoded value" - cdbadmin_pwd: "encoded value" - webserver_user: "encoded value" - webserver_pwd: "encoded value" - -``` -Use **base64** command to encode/decode username and password in the secret file as shown in the following example - -- encode -```bash -echo "ThisIsMyPassword" |base64 -i -VGhpc0lzTXlQYXNzd29yZAo= -``` -- decode -```bash - echo "VGhpc0lzTXlQYXNzd29yZAo=" | base64 --decode -ThisIsMyPassword - -``` - - ->Note that we do not have to create webuser on the database. - -+ Check secret: - -```bash -kubectl get secret -n oracle-database-operator-system -NAME TYPE DATA AGE -cdb1-secret Opaque 6 7s <--- -container-registry-secret kubernetes.io/dockerconfigjson 1 2m17s -webhook-server-cert kubernetes.io/tls 3 4m55s -``` - ->**TIPS:** Use the following commands to analyze contents of an existing secret ```bash kubectl get secret -o yaml -n ``` ----- - #### Create Certificates + Create certificates: At this stage we need to create certificates on our local machine and upload into kubernetes cluster by creating new secrets. @@ -257,7 +202,7 @@ webhook-server-cert kubernetes.io/tls 3 4m55s ```bash -genrsa -out 2048 +openssl genrsa -out 2048 openssl req -new -x509 -days 365 -key -subj "/C=CN/ST=GD/L=SZ/O=oracle, Inc./CN=oracle Root CA" -out openssl req -newkey rsa:2048 -nodes -keyout -subj "/C=CN/ST=GD/L=SZ/O=oracle, Inc./CN=-ords" -out server.csr /usr/bin/echo "subjectAltName=DNS:-ords,DNS:www.example.com" > extfile.txt @@ -270,6 +215,9 @@ kubectl create secret generic db-ca --from-file= -n oracle-database-op [Example of execution:](./logfiles/openssl_execution.log) +#### CDB and PDB credential + +Refer to the [landing page](../README.md) to implement openssl encrpted secrets. ---- @@ -283,9 +231,9 @@ kubectl create secret generic db-ca --from-file= -n oracle-database-op + Create ords container ```bash -/usr/bin/kubectl apply -f cdb_create.yaml -n oracle-database-operator-system +/usr/bin/kubectl apply -f create_ords_pod.yaml -n oracle-database-operator-system ``` -Example: **cdb_create.yaml** +Example: **create_ords_pod.yaml** ```yaml apiVersion: database.oracle.com/v1alpha1 @@ -299,30 +247,30 @@ spec: ordsImagePullPolicy: "Always" dbTnsurl : "...Container tns alias....." replicas: 1 - sysAdminPwd: - secret: - secretName: "cdb1-secret" - key: "sysadmin_pwd" + sysAdminPwd: + secret: + secretName: "syspwd" + key: "e_syspwd.txt" ordsPwd: - secret: - secretName: "cdb1-secret" - key: "ords_pwd" - cdbAdminUser: - secret: - secretName: "cdb1-secret" - key: "cdbadmin_user" - cdbAdminPwd: - secret: - secretName: "cdb1-secret" - key: "cdbadmin_pwd" - webServerUser: - secret: - secretName: "cdb1-secret" - key: "webserver_user" - webServerPwd: - secret: - secretName: "cdb1-secret" - key: "webserver_pwd" + secret: + secretName: "ordpwd" + key: "e_ordpwd.txt" + cdbAdminUser: + secret: + secretName: "cdbusr" + key: "e_cdbusr.txt" + cdbAdminPwd: + secret: + secretName: "cdbpwd" + key: "e_cdbpwd.txt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" cdbTlsKey: secret: secretName: "db-tls" @@ -331,6 +279,11 @@ spec: secret: secretName: "db-tls" key: "tls.crt" + cdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" + ``` > **Note** if you are working in dataguard environment with multiple sites (AC/DR) specifying the host name (dbServer/dbPort/serviceName) may not be the suitable solution for this kind of configuration, use **dbTnsurl** instead. Specify the whole tns string which includes the hosts/scan list. @@ -351,7 +304,7 @@ spec: dbtnsurl:((DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(TRANS...... ``` -[Example of cdb.yaml](./cdb_create.yaml) +[create_ords_pod.yaml example](./create_ords_pod.yaml) ---- @@ -417,51 +370,16 @@ NAME CDB NAME DB SERVER DB PORT REPLICAS STATUS MESSAG [Example of executions](./logfiles/ordsconfig.log) ----- - -#### Create PDB secret - - -```bash -/usr/bin/kubectl apply -f pdb.yaml -n oracle-database-operator-system -``` -Exmaple: **pdb_secret.yaml** - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: pdb1-secret - namespace: oracle-database-operator-system -type: Opaque -data: - sysadmin_user: "encoded value" - sysadmin_pwd: "encoded value" -``` - -+ Check secret creation - -```bash -kubectl get secret -n oracle-database-operator-system -NAME TYPE DATA AGE -cdb1-secret Opaque 6 79m -container-registry-secret kubernetes.io/dockerconfigjson 1 79m -db-ca Opaque 1 78m -db-tls kubernetes.io/tls 2 78m -pdb1-secret Opaque 2 79m <--- -webhook-server-cert kubernetes.io/tls 3 79m -``` ---- - #### Apply pdb yaml file to create pdb ```bash -/usr/bin/kubectl apply -f pdb.yaml -n oracle-database-operator-system +/usr/bin/kubectl apply -f create_pdb1_resource.yaml -n oracle-database-operator-system ``` -Example: **pdb_create.yaml** +Example: **create_pdb1_resource.yaml** ```yaml -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: PDB metadata: name: pdb1 @@ -470,17 +388,24 @@ metadata: cdb: cdb-dev spec: cdbResName: "cdb-dev" - cdbNamespace: "oracle-database-operator-system" + cdbNamespace: "cdbnamespace" cdbName: "DB12" pdbName: "pdbdev" + assertivePdbDeletion: true + fileNameConversions: "NONE" + unlimitedStorage: false + tdeImport: false + totalSize: "2G" + tempSize: "800M" + action: "Create" adminName: secret: - secretName: "pdb1-secret" - key: "sysadmin_user" + secretName: "pdbusr" + key: "e_pdbusr.txt" adminPwd: secret: - secretName: "pdb1-secret" - key: "sysadmin_pwd" + secretName: "pdbpwd" + key: "e_pdbpwd.txt" pdbTlsKey: secret: secretName: "db-tls" @@ -495,18 +420,16 @@ spec: key: "ca.crt" webServerUser: secret: - secretName: "pdb1-secret" - key: "webserver_user" + secretName: "wbuser" + key: "e_wbuser.txt" webServerPwd: secret: - secretName: "pdb1-secret" - key: "webserver_pwd" - fileNameConversions: "NONE" - tdeImport: false - totalSize: "1G" - tempSize: "100M" - action: "Create" - assertivePdbDeletion: true + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" ``` + Monitor the pdb creation status until message is success diff --git a/docs/multitenant/usecase01/ca.crt b/docs/multitenant/ords-based/usecase01/ca.crt similarity index 100% rename from docs/multitenant/usecase01/ca.crt rename to docs/multitenant/ords-based/usecase01/ca.crt diff --git a/docs/multitenant/usecase01/ca.key b/docs/multitenant/ords-based/usecase01/ca.key similarity index 100% rename from docs/multitenant/usecase01/ca.key rename to docs/multitenant/ords-based/usecase01/ca.key diff --git a/docs/multitenant/usecase01/ca.srl b/docs/multitenant/ords-based/usecase01/ca.srl similarity index 100% rename from docs/multitenant/usecase01/ca.srl rename to docs/multitenant/ords-based/usecase01/ca.srl diff --git a/docs/multitenant/provisioning/singlenamespace/cdb_create.yaml b/docs/multitenant/ords-based/usecase01/cdb_create.yaml similarity index 100% rename from docs/multitenant/provisioning/singlenamespace/cdb_create.yaml rename to docs/multitenant/ords-based/usecase01/cdb_create.yaml diff --git a/docs/multitenant/usecase01/cdb_secret.yaml b/docs/multitenant/ords-based/usecase01/cdb_secret.yaml similarity index 100% rename from docs/multitenant/usecase01/cdb_secret.yaml rename to docs/multitenant/ords-based/usecase01/cdb_secret.yaml diff --git a/docs/multitenant/usecase02/pdb_clone.yaml b/docs/multitenant/ords-based/usecase01/clone_pdb1_resource.yaml similarity index 57% rename from docs/multitenant/usecase02/pdb_clone.yaml rename to docs/multitenant/ords-based/usecase01/clone_pdb1_resource.yaml index 0ecc3c70..3cc2c3dd 100644 --- a/docs/multitenant/usecase02/pdb_clone.yaml +++ b/docs/multitenant/ords-based/usecase01/clone_pdb1_resource.yaml @@ -1,11 +1,7 @@ -# -# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: PDB metadata: - name: pdb2 + name: pdb3 namespace: oracle-database-operator-system labels: cdb: cdb-dev @@ -13,20 +9,21 @@ spec: cdbResName: "cdb-dev" cdbNamespace: "oracle-database-operator-system" cdbName: "DB12" - pdbName: "pdb2_clone" + pdbName: "new_clone" srcPdbName: "pdbdev" fileNameConversions: "NONE" totalSize: "UNLIMITED" tempSize: "UNLIMITED" assertivePdbDeletion: true + action: "Clone" adminName: secret: - secretName: "pdb1-secret" - key: "sysadmin_user" + secretName: "pdbusr" + key: "e_pdbusr.txt" adminPwd: secret: - secretName: "pdb1-secret" - key: "sysadmin_pwd" + secretName: "pdbpwd" + key: "e_pdbpwd.txt" pdbTlsKey: secret: secretName: "db-tls" @@ -41,10 +38,13 @@ spec: key: "ca.crt" webServerUser: secret: - secretName: "pdb1-secret" - key: "webserver_user" + secretName: "wbuser" + key: "e_wbuser.txt" webServerPwd: secret: - secretName: "pdb1-secret" - key: "webserver_pwd" - action: "Clone" + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/clone_pdb2_resource.yaml b/docs/multitenant/ords-based/usecase01/clone_pdb2_resource.yaml new file mode 100644 index 00000000..28a4eab6 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/clone_pdb2_resource.yaml @@ -0,0 +1,50 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb4 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "new_clone2" + srcPdbName: "pdbprd" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + assertivePdbDeletion: true + action: "Clone" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/close_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase01/close_pdb1_resource.yaml new file mode 100644 index 00000000..a5c3cf59 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/close_pdb1_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/close_pdb2_resource.yaml b/docs/multitenant/ords-based/usecase01/close_pdb2_resource.yaml new file mode 100644 index 00000000..7fa15111 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/close_pdb2_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb2 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbprd" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/close_pdb3_resource.yaml b/docs/multitenant/ords-based/usecase01/close_pdb3_resource.yaml new file mode 100644 index 00000000..fa7cf009 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/close_pdb3_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb3 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: ""new_clone" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/create_ords_pod.yaml b/docs/multitenant/ords-based/usecase01/create_ords_pod.yaml new file mode 100644 index 00000000..e39c4c56 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/create_ords_pod.yaml @@ -0,0 +1,48 @@ +apiVersion: database.oracle.com/v4 +kind: CDB +metadata: + name: cdb-dev + namespace: oracle-database-operator-system +spec: + cdbName: "DB12" + ordsImage: _your_container_registry/ords-dboper:latest + ordsImagePullPolicy: "Always" + dbTnsurl : "T H I S I S J U S T A N E X A M P L E ....(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS)))" + replicas: 1 + deletePdbCascade: true + sysAdminPwd: + secret: + secretName: "syspwd" + key: "e_syspwd.txt" + ordsPwd: + secret: + secretName: "ordpwd" + key: "e_ordpwd.txt" + cdbAdminUser: + secret: + secretName: "cdbusr" + key: "e_cdbusr.txt" + cdbAdminPwd: + secret: + secretName: "cdbpwd" + key: "e_cdbpwd.txt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + cdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + cdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/create_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase01/create_pdb1_resource.yaml new file mode 100644 index 00000000..044d466b --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/create_pdb1_resource.yaml @@ -0,0 +1,51 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + assertivePdbDeletion: true + fileNameConversions: "NONE" + unlimitedStorage: false + tdeImport: false + totalSize: "2G" + tempSize: "800M" + action: "Create" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/create_pdb2_resource.yaml b/docs/multitenant/ords-based/usecase01/create_pdb2_resource.yaml new file mode 100644 index 00000000..eb36aaa2 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/create_pdb2_resource.yaml @@ -0,0 +1,51 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb2 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbprd" + assertivePdbDeletion: true + fileNameConversions: "NONE" + unlimitedStorage: false + tdeImport: false + totalSize: "2G" + tempSize: "800M" + action: "Create" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/delete_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase01/delete_pdb1_resource.yaml new file mode 100644 index 00000000..b0816929 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/delete_pdb1_resource.yaml @@ -0,0 +1,45 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + pdbName: "pdbdev" + action: "Delete" + dropAction: "INCLUDING" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/delete_pdb2_resource.yaml b/docs/multitenant/ords-based/usecase01/delete_pdb2_resource.yaml new file mode 100644 index 00000000..d2ad95cc --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/delete_pdb2_resource.yaml @@ -0,0 +1,45 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb2 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + pdbName: "pdbprd" + action: "Delete" + dropAction: "INCLUDING" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/usecase01/extfile.txt b/docs/multitenant/ords-based/usecase01/extfile.txt similarity index 100% rename from docs/multitenant/usecase01/extfile.txt rename to docs/multitenant/ords-based/usecase01/extfile.txt diff --git a/docs/multitenant/ords-based/usecase01/logfiles/BuildImage.log b/docs/multitenant/ords-based/usecase01/logfiles/BuildImage.log new file mode 100644 index 00000000..f35c66d8 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/logfiles/BuildImage.log @@ -0,0 +1,896 @@ +/usr/bin/docker build -t oracle/ords-dboper:latest ../../../ords +Sending build context to Docker daemon 13.82kB +Step 1/12 : FROM container-registry.oracle.com/java/jdk:latest + ---> b8457e2f0b73 +Step 2/12 : ENV ORDS_HOME=/opt/oracle/ords/ RUN_FILE="runOrdsSSL.sh" ORDSVERSION=23.4.0-8 + ---> Using cache + ---> 3317a16cd6f8 +Step 3/12 : COPY $RUN_FILE $ORDS_HOME + ---> 7995edec33cc +Step 4/12 : RUN yum -y install yum-utils bind-utils tree hostname openssl net-tools zip unzip tar wget vim-minimal which sudo expect procps curl lsof && yum-config-manager --add-repo=http://yum.oracle.com/repo/OracleLinux/OL8/oracle/software/x86_64 && yum -y install java-11-openjdk-devel && yum -y install iproute && yum clean all + ---> Running in fe168b01f3ad +Oracle Linux 8 BaseOS Latest (x86_64) 91 MB/s | 79 MB 00:00 +Oracle Linux 8 Application Stream (x86_64) 69 MB/s | 62 MB 00:00 +Last metadata expiration check: 0:00:12 ago on Tue 20 Aug 2024 08:54:50 AM UTC. +Package yum-utils-4.0.21-23.0.1.el8.noarch is already installed. +Package tar-2:1.30-9.el8.x86_64 is already installed. +Package vim-minimal-2:8.0.1763-19.0.1.el8_6.4.x86_64 is already installed. +Package procps-ng-3.3.15-14.0.1.el8.x86_64 is already installed. +Package curl-7.61.1-33.el8_9.5.x86_64 is already installed. +Dependencies resolved. +================================================================================ + Package Arch Version Repository Size +================================================================================ +Installing: + bind-utils x86_64 32:9.11.36-16.el8_10.2 ol8_appstream 453 k + expect x86_64 5.45.4-5.el8 ol8_baseos_latest 266 k + hostname x86_64 3.20-6.el8 ol8_baseos_latest 32 k + lsof x86_64 4.93.2-1.el8 ol8_baseos_latest 253 k + net-tools x86_64 2.0-0.52.20160912git.el8 ol8_baseos_latest 322 k + openssl x86_64 1:1.1.1k-12.el8_9 ol8_baseos_latest 710 k + sudo x86_64 1.9.5p2-1.el8_9 ol8_baseos_latest 1.0 M + tree x86_64 1.7.0-15.el8 ol8_baseos_latest 59 k + unzip x86_64 6.0-46.0.1.el8 ol8_baseos_latest 196 k + wget x86_64 1.19.5-12.0.1.el8_10 ol8_appstream 733 k + which x86_64 2.21-20.el8 ol8_baseos_latest 50 k + zip x86_64 3.0-23.el8 ol8_baseos_latest 270 k +Upgrading: + curl x86_64 7.61.1-34.el8 ol8_baseos_latest 352 k + dnf-plugins-core noarch 4.0.21-25.0.1.el8 ol8_baseos_latest 76 k + libcurl x86_64 7.61.1-34.el8 ol8_baseos_latest 303 k + python3-dnf-plugins-core + noarch 4.0.21-25.0.1.el8 ol8_baseos_latest 263 k + yum-utils noarch 4.0.21-25.0.1.el8 ol8_baseos_latest 75 k +Installing dependencies: + bind-libs x86_64 32:9.11.36-16.el8_10.2 ol8_appstream 176 k + bind-libs-lite x86_64 32:9.11.36-16.el8_10.2 ol8_appstream 1.2 M + bind-license noarch 32:9.11.36-16.el8_10.2 ol8_appstream 104 k + fstrm x86_64 0.6.1-3.el8 ol8_appstream 29 k + libmaxminddb x86_64 1.2.0-10.el8_9.1 ol8_appstream 32 k + libmetalink x86_64 0.1.3-7.el8 ol8_baseos_latest 32 k + protobuf-c x86_64 1.3.0-8.el8 ol8_appstream 37 k + python3-bind noarch 32:9.11.36-16.el8_10.2 ol8_appstream 151 k + python3-ply noarch 3.9-9.el8 ol8_baseos_latest 111 k + tcl x86_64 1:8.6.8-2.el8 ol8_baseos_latest 1.1 M +Installing weak dependencies: + geolite2-city noarch 20180605-1.el8 ol8_appstream 19 M + geolite2-country noarch 20180605-1.el8 ol8_appstream 1.0 M + +Transaction Summary +================================================================================ +Install 24 Packages +Upgrade 5 Packages + +Total download size: 28 M +Downloading Packages: +(1/29): hostname-3.20-6.el8.x86_64.rpm 268 kB/s | 32 kB 00:00 +(2/29): libmetalink-0.1.3-7.el8.x86_64.rpm 257 kB/s | 32 kB 00:00 +(3/29): expect-5.45.4-5.el8.x86_64.rpm 1.4 MB/s | 266 kB 00:00 +(4/29): lsof-4.93.2-1.el8.x86_64.rpm 3.2 MB/s | 253 kB 00:00 +(5/29): net-tools-2.0-0.52.20160912git.el8.x86_ 3.6 MB/s | 322 kB 00:00 +(6/29): python3-ply-3.9-9.el8.noarch.rpm 2.7 MB/s | 111 kB 00:00 +(7/29): openssl-1.1.1k-12.el8_9.x86_64.rpm 10 MB/s | 710 kB 00:00 +(8/29): tree-1.7.0-15.el8.x86_64.rpm 2.2 MB/s | 59 kB 00:00 +(9/29): sudo-1.9.5p2-1.el8_9.x86_64.rpm 14 MB/s | 1.0 MB 00:00 +(10/29): unzip-6.0-46.0.1.el8.x86_64.rpm 6.8 MB/s | 196 kB 00:00 +(11/29): which-2.21-20.el8.x86_64.rpm 2.0 MB/s | 50 kB 00:00 +(12/29): tcl-8.6.8-2.el8.x86_64.rpm 13 MB/s | 1.1 MB 00:00 +(13/29): bind-libs-9.11.36-16.el8_10.2.x86_64.r 6.7 MB/s | 176 kB 00:00 +(14/29): zip-3.0-23.el8.x86_64.rpm 8.4 MB/s | 270 kB 00:00 +(15/29): bind-libs-lite-9.11.36-16.el8_10.2.x86 29 MB/s | 1.2 MB 00:00 +(16/29): bind-license-9.11.36-16.el8_10.2.noarc 3.3 MB/s | 104 kB 00:00 +(17/29): bind-utils-9.11.36-16.el8_10.2.x86_64. 13 MB/s | 453 kB 00:00 +(18/29): fstrm-0.6.1-3.el8.x86_64.rpm 1.2 MB/s | 29 kB 00:00 +(19/29): libmaxminddb-1.2.0-10.el8_9.1.x86_64.r 1.3 MB/s | 32 kB 00:00 +(20/29): geolite2-country-20180605-1.el8.noarch 17 MB/s | 1.0 MB 00:00 +(21/29): protobuf-c-1.3.0-8.el8.x86_64.rpm 1.5 MB/s | 37 kB 00:00 +(22/29): python3-bind-9.11.36-16.el8_10.2.noarc 5.8 MB/s | 151 kB 00:00 +(23/29): wget-1.19.5-12.0.1.el8_10.x86_64.rpm 17 MB/s | 733 kB 00:00 +(24/29): curl-7.61.1-34.el8.x86_64.rpm 12 MB/s | 352 kB 00:00 +(25/29): dnf-plugins-core-4.0.21-25.0.1.el8.noa 2.4 MB/s | 76 kB 00:00 +(26/29): libcurl-7.61.1-34.el8.x86_64.rpm 8.6 MB/s | 303 kB 00:00 +(27/29): python3-dnf-plugins-core-4.0.21-25.0.1 9.8 MB/s | 263 kB 00:00 +(28/29): yum-utils-4.0.21-25.0.1.el8.noarch.rpm 3.0 MB/s | 75 kB 00:00 +(29/29): geolite2-city-20180605-1.el8.noarch.rp 66 MB/s | 19 MB 00:00 +-------------------------------------------------------------------------------- +Total 43 MB/s | 28 MB 00:00 +Running transaction check +Transaction check succeeded. +Running transaction test +Transaction test succeeded. +Running transaction + Preparing : 1/1 + Running scriptlet: protobuf-c-1.3.0-8.el8.x86_64 1/1 + Installing : protobuf-c-1.3.0-8.el8.x86_64 1/34 + Installing : fstrm-0.6.1-3.el8.x86_64 2/34 + Installing : bind-license-32:9.11.36-16.el8_10.2.noarch 3/34 + Upgrading : python3-dnf-plugins-core-4.0.21-25.0.1.el8.noarch 4/34 + Upgrading : dnf-plugins-core-4.0.21-25.0.1.el8.noarch 5/34 + Upgrading : libcurl-7.61.1-34.el8.x86_64 6/34 + Installing : geolite2-country-20180605-1.el8.noarch 7/34 + Installing : geolite2-city-20180605-1.el8.noarch 8/34 + Installing : libmaxminddb-1.2.0-10.el8_9.1.x86_64 9/34 + Running scriptlet: libmaxminddb-1.2.0-10.el8_9.1.x86_64 9/34 + Installing : bind-libs-lite-32:9.11.36-16.el8_10.2.x86_64 10/34 + Installing : bind-libs-32:9.11.36-16.el8_10.2.x86_64 11/34 + Installing : unzip-6.0-46.0.1.el8.x86_64 12/34 + Installing : tcl-1:8.6.8-2.el8.x86_64 13/34 + Running scriptlet: tcl-1:8.6.8-2.el8.x86_64 13/34 + Installing : python3-ply-3.9-9.el8.noarch 14/34 + Installing : python3-bind-32:9.11.36-16.el8_10.2.noarch 15/34 + Installing : libmetalink-0.1.3-7.el8.x86_64 16/34 + Installing : wget-1.19.5-12.0.1.el8_10.x86_64 17/34 + Running scriptlet: wget-1.19.5-12.0.1.el8_10.x86_64 17/34 + Installing : bind-utils-32:9.11.36-16.el8_10.2.x86_64 18/34 + Installing : expect-5.45.4-5.el8.x86_64 19/34 + Installing : zip-3.0-23.el8.x86_64 20/34 + Upgrading : curl-7.61.1-34.el8.x86_64 21/34 + Upgrading : yum-utils-4.0.21-25.0.1.el8.noarch 22/34 + Installing : which-2.21-20.el8.x86_64 23/34 + Installing : tree-1.7.0-15.el8.x86_64 24/34 + Installing : sudo-1.9.5p2-1.el8_9.x86_64 25/34 + Running scriptlet: sudo-1.9.5p2-1.el8_9.x86_64 25/34 + Installing : openssl-1:1.1.1k-12.el8_9.x86_64 26/34 + Installing : net-tools-2.0-0.52.20160912git.el8.x86_64 27/34 + Running scriptlet: net-tools-2.0-0.52.20160912git.el8.x86_64 27/34 + Installing : lsof-4.93.2-1.el8.x86_64 28/34 + Installing : hostname-3.20-6.el8.x86_64 29/34 + Running scriptlet: hostname-3.20-6.el8.x86_64 29/34 + Cleanup : curl-7.61.1-33.el8_9.5.x86_64 30/34 + Cleanup : yum-utils-4.0.21-23.0.1.el8.noarch 31/34 + Cleanup : dnf-plugins-core-4.0.21-23.0.1.el8.noarch 32/34 + Cleanup : python3-dnf-plugins-core-4.0.21-23.0.1.el8.noarch 33/34 + Cleanup : libcurl-7.61.1-33.el8_9.5.x86_64 34/34 + Running scriptlet: libcurl-7.61.1-33.el8_9.5.x86_64 34/34 + Verifying : expect-5.45.4-5.el8.x86_64 1/34 + Verifying : hostname-3.20-6.el8.x86_64 2/34 + Verifying : libmetalink-0.1.3-7.el8.x86_64 3/34 + Verifying : lsof-4.93.2-1.el8.x86_64 4/34 + Verifying : net-tools-2.0-0.52.20160912git.el8.x86_64 5/34 + Verifying : openssl-1:1.1.1k-12.el8_9.x86_64 6/34 + Verifying : python3-ply-3.9-9.el8.noarch 7/34 + Verifying : sudo-1.9.5p2-1.el8_9.x86_64 8/34 + Verifying : tcl-1:8.6.8-2.el8.x86_64 9/34 + Verifying : tree-1.7.0-15.el8.x86_64 10/34 + Verifying : unzip-6.0-46.0.1.el8.x86_64 11/34 + Verifying : which-2.21-20.el8.x86_64 12/34 + Verifying : zip-3.0-23.el8.x86_64 13/34 + Verifying : bind-libs-32:9.11.36-16.el8_10.2.x86_64 14/34 + Verifying : bind-libs-lite-32:9.11.36-16.el8_10.2.x86_64 15/34 + Verifying : bind-license-32:9.11.36-16.el8_10.2.noarch 16/34 + Verifying : bind-utils-32:9.11.36-16.el8_10.2.x86_64 17/34 + Verifying : fstrm-0.6.1-3.el8.x86_64 18/34 + Verifying : geolite2-city-20180605-1.el8.noarch 19/34 + Verifying : geolite2-country-20180605-1.el8.noarch 20/34 + Verifying : libmaxminddb-1.2.0-10.el8_9.1.x86_64 21/34 + Verifying : protobuf-c-1.3.0-8.el8.x86_64 22/34 + Verifying : python3-bind-32:9.11.36-16.el8_10.2.noarch 23/34 + Verifying : wget-1.19.5-12.0.1.el8_10.x86_64 24/34 + Verifying : curl-7.61.1-34.el8.x86_64 25/34 + Verifying : curl-7.61.1-33.el8_9.5.x86_64 26/34 + Verifying : dnf-plugins-core-4.0.21-25.0.1.el8.noarch 27/34 + Verifying : dnf-plugins-core-4.0.21-23.0.1.el8.noarch 28/34 + Verifying : libcurl-7.61.1-34.el8.x86_64 29/34 + Verifying : libcurl-7.61.1-33.el8_9.5.x86_64 30/34 + Verifying : python3-dnf-plugins-core-4.0.21-25.0.1.el8.noarch 31/34 + Verifying : python3-dnf-plugins-core-4.0.21-23.0.1.el8.noarch 32/34 + Verifying : yum-utils-4.0.21-25.0.1.el8.noarch 33/34 + Verifying : yum-utils-4.0.21-23.0.1.el8.noarch 34/34 + +Upgraded: + curl-7.61.1-34.el8.x86_64 + dnf-plugins-core-4.0.21-25.0.1.el8.noarch + libcurl-7.61.1-34.el8.x86_64 + python3-dnf-plugins-core-4.0.21-25.0.1.el8.noarch + yum-utils-4.0.21-25.0.1.el8.noarch +Installed: + bind-libs-32:9.11.36-16.el8_10.2.x86_64 + bind-libs-lite-32:9.11.36-16.el8_10.2.x86_64 + bind-license-32:9.11.36-16.el8_10.2.noarch + bind-utils-32:9.11.36-16.el8_10.2.x86_64 + expect-5.45.4-5.el8.x86_64 + fstrm-0.6.1-3.el8.x86_64 + geolite2-city-20180605-1.el8.noarch + geolite2-country-20180605-1.el8.noarch + hostname-3.20-6.el8.x86_64 + libmaxminddb-1.2.0-10.el8_9.1.x86_64 + libmetalink-0.1.3-7.el8.x86_64 + lsof-4.93.2-1.el8.x86_64 + net-tools-2.0-0.52.20160912git.el8.x86_64 + openssl-1:1.1.1k-12.el8_9.x86_64 + protobuf-c-1.3.0-8.el8.x86_64 + python3-bind-32:9.11.36-16.el8_10.2.noarch + python3-ply-3.9-9.el8.noarch + sudo-1.9.5p2-1.el8_9.x86_64 + tcl-1:8.6.8-2.el8.x86_64 + tree-1.7.0-15.el8.x86_64 + unzip-6.0-46.0.1.el8.x86_64 + wget-1.19.5-12.0.1.el8_10.x86_64 + which-2.21-20.el8.x86_64 + zip-3.0-23.el8.x86_64 + +Complete! +Adding repo from: http://yum.oracle.com/repo/OracleLinux/OL8/oracle/software/x86_64 +created by dnf config-manager from http://yum.o 496 kB/s | 139 kB 00:00 +Last metadata expiration check: 0:00:01 ago on Tue 20 Aug 2024 08:55:14 AM UTC. +Dependencies resolved. +============================================================================================== + Package Arch Version Repository Size +============================================================================================== +Installing: + java-11-openjdk-devel x86_64 1:11.0.24.0.8-3.0.1.el8 ol8_appstream 3.4 M +Installing dependencies: + adwaita-cursor-theme noarch 3.28.0-3.el8 ol8_appstream 647 k + adwaita-icon-theme noarch 3.28.0-3.el8 ol8_appstream 11 M + alsa-lib x86_64 1.2.10-2.el8 ol8_appstream 500 k + at-spi2-atk x86_64 2.26.2-1.el8 ol8_appstream 89 k + at-spi2-core x86_64 2.28.0-1.el8 ol8_appstream 169 k + atk x86_64 2.28.1-1.el8 ol8_appstream 272 k + avahi-libs x86_64 0.7-27.el8 ol8_baseos_latest 61 k + cairo x86_64 1.15.12-6.el8 ol8_appstream 719 k + cairo-gobject x86_64 1.15.12-6.el8 ol8_appstream 33 k + colord-libs x86_64 1.4.2-1.el8 ol8_appstream 236 k + copy-jdk-configs noarch 4.0-2.el8 ol8_appstream 30 k + cpio x86_64 2.12-11.el8 ol8_baseos_latest 266 k + crypto-policies-scripts noarch 20230731-1.git3177e06.el8 ol8_baseos_latest 84 k + cups-libs x86_64 1:2.2.6-60.el8_10 ol8_baseos_latest 435 k + dracut x86_64 049-233.git20240115.0.1.el8 ol8_baseos_latest 382 k + file x86_64 5.33-25.el8 ol8_baseos_latest 77 k + fribidi x86_64 1.0.4-9.el8 ol8_appstream 89 k + gdk-pixbuf2 x86_64 2.36.12-6.el8_10 ol8_baseos_latest 465 k + gdk-pixbuf2-modules x86_64 2.36.12-6.el8_10 ol8_appstream 108 k + gettext x86_64 0.19.8.1-17.el8 ol8_baseos_latest 1.1 M + gettext-libs x86_64 0.19.8.1-17.el8 ol8_baseos_latest 312 k + glib-networking x86_64 2.56.1-1.1.el8 ol8_baseos_latest 155 k + graphite2 x86_64 1.3.10-10.el8 ol8_appstream 122 k + grub2-common noarch 1:2.02-156.0.2.el8 ol8_baseos_latest 897 k + grub2-tools x86_64 1:2.02-156.0.2.el8 ol8_baseos_latest 2.0 M + grub2-tools-minimal x86_64 1:2.02-156.0.2.el8 ol8_baseos_latest 215 k + gsettings-desktop-schemas x86_64 3.32.0-6.el8 ol8_baseos_latest 633 k + gtk-update-icon-cache x86_64 3.22.30-11.el8 ol8_appstream 32 k + harfbuzz x86_64 1.7.5-4.el8 ol8_appstream 295 k + hicolor-icon-theme noarch 0.17-2.el8 ol8_appstream 48 k + jasper-libs x86_64 2.0.14-5.el8 ol8_appstream 167 k + java-11-openjdk x86_64 1:11.0.24.0.8-3.0.1.el8 ol8_appstream 475 k + java-11-openjdk-headless x86_64 1:11.0.24.0.8-3.0.1.el8 ol8_appstream 42 M + javapackages-filesystem noarch 5.3.0-1.module+el8+5136+7ff78f74 ol8_appstream 30 k + jbigkit-libs x86_64 2.1-14.el8 ol8_appstream 55 k + json-glib x86_64 1.4.4-1.el8 ol8_baseos_latest 144 k + kbd-legacy noarch 2.0.4-11.el8 ol8_baseos_latest 481 k + kbd-misc noarch 2.0.4-11.el8 ol8_baseos_latest 1.5 M + lcms2 x86_64 2.9-2.el8 ol8_appstream 164 k + libX11 x86_64 1.6.8-8.el8 ol8_appstream 611 k + libX11-common noarch 1.6.8-8.el8 ol8_appstream 157 k + libXau x86_64 1.0.9-3.el8 ol8_appstream 37 k + libXcomposite x86_64 0.4.4-14.el8 ol8_appstream 28 k + libXcursor x86_64 1.1.15-3.el8 ol8_appstream 36 k + libXdamage x86_64 1.1.4-14.el8 ol8_appstream 27 k + libXext x86_64 1.3.4-1.el8 ol8_appstream 45 k + libXfixes x86_64 5.0.3-7.el8 ol8_appstream 25 k + libXft x86_64 2.3.3-1.el8 ol8_appstream 67 k + libXi x86_64 1.7.10-1.el8 ol8_appstream 49 k + libXinerama x86_64 1.1.4-1.el8 ol8_appstream 15 k + libXrandr x86_64 1.5.2-1.el8 ol8_appstream 34 k + libXrender x86_64 0.9.10-7.el8 ol8_appstream 33 k + libXtst x86_64 1.2.3-7.el8 ol8_appstream 22 k + libcroco x86_64 0.6.12-4.el8_2.1 ol8_baseos_latest 113 k + libdatrie x86_64 0.2.9-7.el8 ol8_appstream 33 k + libepoxy x86_64 1.5.8-1.el8 ol8_appstream 225 k + libfontenc x86_64 1.1.3-8.el8 ol8_appstream 37 k + libgomp x86_64 8.5.0-22.0.1.el8_10 ol8_baseos_latest 218 k + libgusb x86_64 0.3.0-1.el8 ol8_baseos_latest 49 k + libjpeg-turbo x86_64 1.5.3-12.el8 ol8_appstream 157 k + libkcapi x86_64 1.4.0-2.0.1.el8 ol8_baseos_latest 52 k + libkcapi-hmaccalc x86_64 1.4.0-2.0.1.el8 ol8_baseos_latest 31 k + libmodman x86_64 2.0.1-17.el8 ol8_baseos_latest 36 k + libpkgconf x86_64 1.4.2-1.el8 ol8_baseos_latest 35 k + libproxy x86_64 0.4.15-5.2.el8 ol8_baseos_latest 75 k + libsoup x86_64 2.62.3-5.el8 ol8_baseos_latest 424 k + libthai x86_64 0.1.27-2.el8 ol8_appstream 203 k + libtiff x86_64 4.0.9-32.el8_10 ol8_appstream 189 k + libwayland-client x86_64 1.21.0-1.el8 ol8_appstream 41 k + libwayland-cursor x86_64 1.21.0-1.el8 ol8_appstream 26 k + libwayland-egl x86_64 1.21.0-1.el8 ol8_appstream 19 k + libxcb x86_64 1.13.1-1.el8 ol8_appstream 231 k + libxkbcommon x86_64 0.9.1-1.el8 ol8_appstream 116 k + lksctp-tools x86_64 1.0.18-3.el8 ol8_baseos_latest 100 k + lua x86_64 5.3.4-12.el8 ol8_appstream 192 k + nspr x86_64 4.35.0-1.el8_8 ol8_appstream 143 k + nss x86_64 3.90.0-7.el8_10 ol8_appstream 750 k + nss-softokn x86_64 3.90.0-7.el8_10 ol8_appstream 1.2 M + nss-softokn-freebl x86_64 3.90.0-7.el8_10 ol8_appstream 375 k + nss-sysinit x86_64 3.90.0-7.el8_10 ol8_appstream 74 k + nss-util x86_64 3.90.0-7.el8_10 ol8_appstream 139 k + os-prober x86_64 1.74-9.0.1.el8 ol8_baseos_latest 51 k + pango x86_64 1.42.4-8.el8 ol8_appstream 297 k + pixman x86_64 0.38.4-4.el8 ol8_appstream 256 k + pkgconf x86_64 1.4.2-1.el8 ol8_baseos_latest 38 k + pkgconf-m4 noarch 1.4.2-1.el8 ol8_baseos_latest 17 k + pkgconf-pkg-config x86_64 1.4.2-1.el8 ol8_baseos_latest 15 k + rest x86_64 0.8.1-2.el8 ol8_appstream 70 k + shared-mime-info x86_64 1.9-4.el8 ol8_baseos_latest 328 k + systemd-udev x86_64 239-78.0.4.el8 ol8_baseos_latest 1.6 M + ttmkfdir x86_64 3.0.9-54.el8 ol8_appstream 62 k + tzdata-java noarch 2024a-1.0.1.el8 ol8_appstream 186 k + xkeyboard-config noarch 2.28-1.el8 ol8_appstream 782 k + xorg-x11-font-utils x86_64 1:7.5-41.el8 ol8_appstream 104 k + xorg-x11-fonts-Type1 noarch 7.5-19.el8 ol8_appstream 522 k + xz x86_64 5.2.4-4.el8_6 ol8_baseos_latest 153 k +Installing weak dependencies: + abattis-cantarell-fonts noarch 0.0.25-6.el8 ol8_appstream 155 k + dconf x86_64 0.28.0-4.0.1.el8 ol8_appstream 108 k + dejavu-sans-mono-fonts noarch 2.35-7.el8 ol8_baseos_latest 447 k + grubby x86_64 8.40-49.0.2.el8 ol8_baseos_latest 50 k + gtk3 x86_64 3.22.30-11.el8 ol8_appstream 4.5 M + hardlink x86_64 1:1.3-6.el8 ol8_baseos_latest 29 k + kbd x86_64 2.0.4-11.el8 ol8_baseos_latest 390 k + memstrack x86_64 0.2.5-2.el8 ol8_baseos_latest 51 k + pigz x86_64 2.4-4.el8 ol8_baseos_latest 80 k +Enabling module streams: + javapackages-runtime 201801 + +Transaction Summary +============================================================================================== +Install 106 Packages + +Total download size: 86 M +Installed size: 312 M +Downloading Packages: +(1/106): crypto-policies-scripts-20230731-1.git 862 kB/s | 84 kB 00:00 +(2/106): avahi-libs-0.7-27.el8.x86_64.rpm 602 kB/s | 61 kB 00:00 +(3/106): cpio-2.12-11.el8.x86_64.rpm 1.8 MB/s | 266 kB 00:00 +(4/106): cups-libs-2.2.6-60.el8_10.x86_64.rpm 5.7 MB/s | 435 kB 00:00 +(5/106): dejavu-sans-mono-fonts-2.35-7.el8.noar 5.1 MB/s | 447 kB 00:00 +(6/106): dracut-049-233.git20240115.0.1.el8.x86 7.0 MB/s | 382 kB 00:00 +(7/106): gdk-pixbuf2-2.36.12-6.el8_10.x86_64.rp 12 MB/s | 465 kB 00:00 +(8/106): gettext-libs-0.19.8.1-17.el8.x86_64.rp 9.3 MB/s | 312 kB 00:00 +(9/106): gettext-0.19.8.1-17.el8.x86_64.rpm 16 MB/s | 1.1 MB 00:00 +(10/106): glib-networking-2.56.1-1.1.el8.x86_64 6.0 MB/s | 155 kB 00:00 +(11/106): grub2-common-2.02-156.0.2.el8.noarch. 26 MB/s | 897 kB 00:00 +(12/106): grub2-tools-minimal-2.02-156.0.2.el8. 8.2 MB/s | 215 kB 00:00 +(13/106): grubby-8.40-49.0.2.el8.x86_64.rpm 2.1 MB/s | 50 kB 00:00 +(14/106): grub2-tools-2.02-156.0.2.el8.x86_64.r 26 MB/s | 2.0 MB 00:00 +(15/106): gsettings-desktop-schemas-3.32.0-6.el 19 MB/s | 633 kB 00:00 +(16/106): hardlink-1.3-6.el8.x86_64.rpm 1.1 MB/s | 29 kB 00:00 +(17/106): json-glib-1.4.4-1.el8.x86_64.rpm 5.9 MB/s | 144 kB 00:00 +(18/106): kbd-2.0.4-11.el8.x86_64.rpm 14 MB/s | 390 kB 00:00 +(19/106): kbd-legacy-2.0.4-11.el8.noarch.rpm 17 MB/s | 481 kB 00:00 +(20/106): kbd-misc-2.0.4-11.el8.noarch.rpm 41 MB/s | 1.5 MB 00:00 +(21/106): libcroco-0.6.12-4.el8_2.1.x86_64.rpm 4.7 MB/s | 113 kB 00:00 +(22/106): libgomp-8.5.0-22.0.1.el8_10.x86_64.rp 9.1 MB/s | 218 kB 00:00 +(23/106): libgusb-0.3.0-1.el8.x86_64.rpm 2.1 MB/s | 49 kB 00:00 +(24/106): libkcapi-1.4.0-2.0.1.el8.x86_64.rpm 1.6 MB/s | 52 kB 00:00 +(25/106): libkcapi-hmaccalc-1.4.0-2.0.1.el8.x86 822 kB/s | 31 kB 00:00 +(26/106): libmodman-2.0.1-17.el8.x86_64.rpm 1.6 MB/s | 36 kB 00:00 +(27/106): libpkgconf-1.4.2-1.el8.x86_64.rpm 1.2 MB/s | 35 kB 00:00 +(28/106): libproxy-0.4.15-5.2.el8.x86_64.rpm 3.0 MB/s | 75 kB 00:00 +(29/106): libsoup-2.62.3-5.el8.x86_64.rpm 15 MB/s | 424 kB 00:00 +(30/106): lksctp-tools-1.0.18-3.el8.x86_64.rpm 3.5 MB/s | 100 kB 00:00 +(31/106): memstrack-0.2.5-2.el8.x86_64.rpm 2.2 MB/s | 51 kB 00:00 +(32/106): os-prober-1.74-9.0.1.el8.x86_64.rpm 2.2 MB/s | 51 kB 00:00 +(33/106): pigz-2.4-4.el8.x86_64.rpm 3.5 MB/s | 80 kB 00:00 +(34/106): pkgconf-1.4.2-1.el8.x86_64.rpm 1.7 MB/s | 38 kB 00:00 +(35/106): pkgconf-m4-1.4.2-1.el8.noarch.rpm 761 kB/s | 17 kB 00:00 +(36/106): pkgconf-pkg-config-1.4.2-1.el8.x86_64 691 kB/s | 15 kB 00:00 +(37/106): shared-mime-info-1.9-4.el8.x86_64.rpm 13 MB/s | 328 kB 00:00 +(38/106): systemd-udev-239-78.0.4.el8.x86_64.rp 32 MB/s | 1.6 MB 00:00 +(39/106): xz-5.2.4-4.el8_6.x86_64.rpm 5.2 MB/s | 153 kB 00:00 +(40/106): abattis-cantarell-fonts-0.0.25-6.el8. 6.4 MB/s | 155 kB 00:00 +(41/106): adwaita-cursor-theme-3.28.0-3.el8.noa 22 MB/s | 647 kB 00:00 +(42/106): alsa-lib-1.2.10-2.el8.x86_64.rpm 18 MB/s | 500 kB 00:00 +(43/106): at-spi2-atk-2.26.2-1.el8.x86_64.rpm 3.8 MB/s | 89 kB 00:00 +(44/106): at-spi2-core-2.28.0-1.el8.x86_64.rpm 6.9 MB/s | 169 kB 00:00 +(45/106): atk-2.28.1-1.el8.x86_64.rpm 9.2 MB/s | 272 kB 00:00 +(46/106): cairo-1.15.12-6.el8.x86_64.rpm 24 MB/s | 719 kB 00:00 +(47/106): adwaita-icon-theme-3.28.0-3.el8.noarc 65 MB/s | 11 MB 00:00 +(48/106): cairo-gobject-1.15.12-6.el8.x86_64.rp 914 kB/s | 33 kB 00:00 +(49/106): colord-libs-1.4.2-1.el8.x86_64.rpm 9.5 MB/s | 236 kB 00:00 +(50/106): copy-jdk-configs-4.0-2.el8.noarch.rpm 1.1 MB/s | 30 kB 00:00 +(51/106): dconf-0.28.0-4.0.1.el8.x86_64.rpm 4.4 MB/s | 108 kB 00:00 +(52/106): fribidi-1.0.4-9.el8.x86_64.rpm 3.9 MB/s | 89 kB 00:00 +(53/106): graphite2-1.3.10-10.el8.x86_64.rpm 5.1 MB/s | 122 kB 00:00 +(54/106): gdk-pixbuf2-modules-2.36.12-6.el8_10. 3.6 MB/s | 108 kB 00:00 +(55/106): gtk-update-icon-cache-3.22.30-11.el8. 1.4 MB/s | 32 kB 00:00 +(56/106): harfbuzz-1.7.5-4.el8.x86_64.rpm 11 MB/s | 295 kB 00:00 +(57/106): gtk3-3.22.30-11.el8.x86_64.rpm 68 MB/s | 4.5 MB 00:00 +(58/106): hicolor-icon-theme-0.17-2.el8.noarch. 2.1 MB/s | 48 kB 00:00 +(59/106): java-11-openjdk-11.0.24.0.8-3.0.1.el8 17 MB/s | 475 kB 00:00 +(60/106): jasper-libs-2.0.14-5.el8.x86_64.rpm 5.0 MB/s | 167 kB 00:00 +(61/106): java-11-openjdk-devel-11.0.24.0.8-3.0 61 MB/s | 3.4 MB 00:00 +(62/106): javapackages-filesystem-5.3.0-1.modul 1.2 MB/s | 30 kB 00:00 +(63/106): jbigkit-libs-2.1-14.el8.x86_64.rpm 2.1 MB/s | 55 kB 00:00 +(64/106): lcms2-2.9-2.el8.x86_64.rpm 3.8 MB/s | 164 kB 00:00 +(65/106): libX11-1.6.8-8.el8.x86_64.rpm 20 MB/s | 611 kB 00:00 +(66/106): libX11-common-1.6.8-8.el8.noarch.rpm 6.8 MB/s | 157 kB 00:00 +(67/106): libXau-1.0.9-3.el8.x86_64.rpm 1.6 MB/s | 37 kB 00:00 +(68/106): libXcomposite-0.4.4-14.el8.x86_64.rpm 1.3 MB/s | 28 kB 00:00 +(69/106): libXcursor-1.1.15-3.el8.x86_64.rpm 1.6 MB/s | 36 kB 00:00 +(70/106): libXdamage-1.1.4-14.el8.x86_64.rpm 1.2 MB/s | 27 kB 00:00 +(71/106): libXext-1.3.4-1.el8.x86_64.rpm 2.0 MB/s | 45 kB 00:00 +(72/106): libXfixes-5.0.3-7.el8.x86_64.rpm 1.1 MB/s | 25 kB 00:00 +(73/106): libXft-2.3.3-1.el8.x86_64.rpm 2.9 MB/s | 67 kB 00:00 +(74/106): libXi-1.7.10-1.el8.x86_64.rpm 2.2 MB/s | 49 kB 00:00 +(75/106): libXinerama-1.1.4-1.el8.x86_64.rpm 717 kB/s | 15 kB 00:00 +(76/106): libXrandr-1.5.2-1.el8.x86_64.rpm 1.5 MB/s | 34 kB 00:00 +(77/106): libXrender-0.9.10-7.el8.x86_64.rpm 1.4 MB/s | 33 kB 00:00 +(78/106): libXtst-1.2.3-7.el8.x86_64.rpm 957 kB/s | 22 kB 00:00 +(79/106): java-11-openjdk-headless-11.0.24.0.8- 71 MB/s | 42 MB 00:00 +(80/106): libdatrie-0.2.9-7.el8.x86_64.rpm 274 kB/s | 33 kB 00:00 +(81/106): libepoxy-1.5.8-1.el8.x86_64.rpm 9.1 MB/s | 225 kB 00:00 +(82/106): libfontenc-1.1.3-8.el8.x86_64.rpm 1.5 MB/s | 37 kB 00:00 +(83/106): libthai-0.1.27-2.el8.x86_64.rpm 8.2 MB/s | 203 kB 00:00 +(84/106): libjpeg-turbo-1.5.3-12.el8.x86_64.rpm 5.1 MB/s | 157 kB 00:00 +(85/106): libtiff-4.0.9-32.el8_10.x86_64.rpm 7.8 MB/s | 189 kB 00:00 +(86/106): libwayland-client-1.21.0-1.el8.x86_64 1.7 MB/s | 41 kB 00:00 +(87/106): libwayland-cursor-1.21.0-1.el8.x86_64 1.2 MB/s | 26 kB 00:00 +(88/106): libwayland-egl-1.21.0-1.el8.x86_64.rp 801 kB/s | 19 kB 00:00 +(89/106): libxcb-1.13.1-1.el8.x86_64.rpm 9.7 MB/s | 231 kB 00:00 +(90/106): libxkbcommon-0.9.1-1.el8.x86_64.rpm 5.0 MB/s | 116 kB 00:00 +(91/106): nspr-4.35.0-1.el8_8.x86_64.rpm 6.0 MB/s | 143 kB 00:00 +(92/106): lua-5.3.4-12.el8.x86_64.rpm 5.9 MB/s | 192 kB 00:00 +(93/106): nss-softokn-3.90.0-7.el8_10.x86_64.rp 38 MB/s | 1.2 MB 00:00 +(94/106): nss-3.90.0-7.el8_10.x86_64.rpm 17 MB/s | 750 kB 00:00 +(95/106): nss-softokn-freebl-3.90.0-7.el8_10.x8 14 MB/s | 375 kB 00:00 +(96/106): nss-sysinit-3.90.0-7.el8_10.x86_64.rp 3.2 MB/s | 74 kB 00:00 +(97/106): nss-util-3.90.0-7.el8_10.x86_64.rpm 5.8 MB/s | 139 kB 00:00 +(98/106): pango-1.42.4-8.el8.x86_64.rpm 11 MB/s | 297 kB 00:00 +(99/106): pixman-0.38.4-4.el8.x86_64.rpm 10 MB/s | 256 kB 00:00 +(100/106): rest-0.8.1-2.el8.x86_64.rpm 3.1 MB/s | 70 kB 00:00 +(101/106): ttmkfdir-3.0.9-54.el8.x86_64.rpm 2.5 MB/s | 62 kB 00:00 +(102/106): tzdata-java-2024a-1.0.1.el8.noarch.r 7.4 MB/s | 186 kB 00:00 +(103/106): xkeyboard-config-2.28-1.el8.noarch.r 27 MB/s | 782 kB 00:00 +(104/106): xorg-x11-font-utils-7.5-41.el8.x86_6 3.9 MB/s | 104 kB 00:00 +(105/106): xorg-x11-fonts-Type1-7.5-19.el8.noar 1.3 MB/s | 522 kB 00:00 +(106/106): file-5.33-25.el8.x86_64.rpm 26 kB/s | 77 kB 00:02 +-------------------------------------------------------------------------------- +Total 27 MB/s | 86 MB 00:03 +Running transaction check +Transaction check succeeded. +Running transaction test +Transaction test succeeded. +Running transaction + Running scriptlet: copy-jdk-configs-4.0-2.el8.noarch 1/1 + Running scriptlet: java-11-openjdk-headless-1:11.0.24.0.8-3.0.1.el8.x86 1/1 + Preparing : 1/1 + Installing : nspr-4.35.0-1.el8_8.x86_64 1/106 + Running scriptlet: nspr-4.35.0-1.el8_8.x86_64 1/106 + Installing : nss-util-3.90.0-7.el8_10.x86_64 2/106 + Installing : libjpeg-turbo-1.5.3-12.el8.x86_64 3/106 + Installing : pixman-0.38.4-4.el8.x86_64 4/106 + Installing : libwayland-client-1.21.0-1.el8.x86_64 5/106 + Installing : atk-2.28.1-1.el8.x86_64 6/106 + Installing : libgomp-8.5.0-22.0.1.el8_10.x86_64 7/106 + Running scriptlet: libgomp-8.5.0-22.0.1.el8_10.x86_64 7/106 + Installing : libcroco-0.6.12-4.el8_2.1.x86_64 8/106 + Running scriptlet: libcroco-0.6.12-4.el8_2.1.x86_64 8/106 + Installing : grub2-common-1:2.02-156.0.2.el8.noarch 9/106 + Installing : gettext-libs-0.19.8.1-17.el8.x86_64 10/106 + Installing : gettext-0.19.8.1-17.el8.x86_64 11/106 + Running scriptlet: gettext-0.19.8.1-17.el8.x86_64 11/106 + Installing : grub2-tools-minimal-1:2.02-156.0.2.el8.x86_64 12/106 + Installing : libwayland-cursor-1.21.0-1.el8.x86_64 13/106 + Installing : jasper-libs-2.0.14-5.el8.x86_64 14/106 + Installing : nss-softokn-freebl-3.90.0-7.el8_10.x86_64 15/106 + Installing : nss-softokn-3.90.0-7.el8_10.x86_64 16/106 + Installing : xkeyboard-config-2.28-1.el8.noarch 17/106 + Installing : libxkbcommon-0.9.1-1.el8.x86_64 18/106 + Installing : tzdata-java-2024a-1.0.1.el8.noarch 19/106 + Installing : ttmkfdir-3.0.9-54.el8.x86_64 20/106 + Installing : lua-5.3.4-12.el8.x86_64 21/106 + Installing : copy-jdk-configs-4.0-2.el8.noarch 22/106 + Installing : libwayland-egl-1.21.0-1.el8.x86_64 23/106 + Installing : libfontenc-1.1.3-8.el8.x86_64 24/106 + Installing : libepoxy-1.5.8-1.el8.x86_64 25/106 + Installing : libdatrie-0.2.9-7.el8.x86_64 26/106 + Running scriptlet: libdatrie-0.2.9-7.el8.x86_64 26/106 + Installing : libthai-0.1.27-2.el8.x86_64 27/106 + Running scriptlet: libthai-0.1.27-2.el8.x86_64 27/106 + Installing : libXau-1.0.9-3.el8.x86_64 28/106 + Installing : libxcb-1.13.1-1.el8.x86_64 29/106 + Installing : libX11-common-1.6.8-8.el8.noarch 30/106 + Installing : libX11-1.6.8-8.el8.x86_64 31/106 + Installing : libXext-1.3.4-1.el8.x86_64 32/106 + Installing : libXrender-0.9.10-7.el8.x86_64 33/106 + Installing : cairo-1.15.12-6.el8.x86_64 34/106 + Installing : libXi-1.7.10-1.el8.x86_64 35/106 + Installing : libXfixes-5.0.3-7.el8.x86_64 36/106 + Installing : libXtst-1.2.3-7.el8.x86_64 37/106 + Installing : libXcomposite-0.4.4-14.el8.x86_64 38/106 + Installing : at-spi2-core-2.28.0-1.el8.x86_64 39/106 + Running scriptlet: at-spi2-core-2.28.0-1.el8.x86_64 39/106 + Installing : at-spi2-atk-2.26.2-1.el8.x86_64 40/106 + Running scriptlet: at-spi2-atk-2.26.2-1.el8.x86_64 40/106 + Installing : libXcursor-1.1.15-3.el8.x86_64 41/106 + Installing : libXdamage-1.1.4-14.el8.x86_64 42/106 + Installing : cairo-gobject-1.15.12-6.el8.x86_64 43/106 + Installing : libXft-2.3.3-1.el8.x86_64 44/106 + Installing : libXrandr-1.5.2-1.el8.x86_64 45/106 + Installing : libXinerama-1.1.4-1.el8.x86_64 46/106 + Installing : lcms2-2.9-2.el8.x86_64 47/106 + Running scriptlet: lcms2-2.9-2.el8.x86_64 47/106 + Installing : jbigkit-libs-2.1-14.el8.x86_64 48/106 + Running scriptlet: jbigkit-libs-2.1-14.el8.x86_64 48/106 + Installing : libtiff-4.0.9-32.el8_10.x86_64 49/106 + Installing : javapackages-filesystem-5.3.0-1.module+el8+5136+ 50/106 + Installing : hicolor-icon-theme-0.17-2.el8.noarch 51/106 + Installing : graphite2-1.3.10-10.el8.x86_64 52/106 + Installing : harfbuzz-1.7.5-4.el8.x86_64 53/106 + Running scriptlet: harfbuzz-1.7.5-4.el8.x86_64 53/106 + Installing : fribidi-1.0.4-9.el8.x86_64 54/106 + Installing : pango-1.42.4-8.el8.x86_64 55/106 + Running scriptlet: pango-1.42.4-8.el8.x86_64 55/106 + Installing : dconf-0.28.0-4.0.1.el8.x86_64 56/106 + Installing : alsa-lib-1.2.10-2.el8.x86_64 57/106 + Running scriptlet: alsa-lib-1.2.10-2.el8.x86_64 57/106 + Installing : adwaita-cursor-theme-3.28.0-3.el8.noarch 58/106 + Installing : adwaita-icon-theme-3.28.0-3.el8.noarch 59/106 + Installing : abattis-cantarell-fonts-0.0.25-6.el8.noarch 60/106 + Installing : xz-5.2.4-4.el8_6.x86_64 61/106 + Installing : shared-mime-info-1.9-4.el8.x86_64 62/106 + Running scriptlet: shared-mime-info-1.9-4.el8.x86_64 62/106 + Installing : gdk-pixbuf2-2.36.12-6.el8_10.x86_64 63/106 + Running scriptlet: gdk-pixbuf2-2.36.12-6.el8_10.x86_64 63/106 + Installing : gdk-pixbuf2-modules-2.36.12-6.el8_10.x86_64 64/106 + Installing : gtk-update-icon-cache-3.22.30-11.el8.x86_64 65/106 + Installing : pkgconf-m4-1.4.2-1.el8.noarch 66/106 + Installing : pigz-2.4-4.el8.x86_64 67/106 + Installing : memstrack-0.2.5-2.el8.x86_64 68/106 + Installing : lksctp-tools-1.0.18-3.el8.x86_64 69/106 + Running scriptlet: lksctp-tools-1.0.18-3.el8.x86_64 69/106 + Installing : libpkgconf-1.4.2-1.el8.x86_64 70/106 + Installing : pkgconf-1.4.2-1.el8.x86_64 71/106 + Installing : pkgconf-pkg-config-1.4.2-1.el8.x86_64 72/106 + Installing : xorg-x11-font-utils-1:7.5-41.el8.x86_64 73/106 + Installing : xorg-x11-fonts-Type1-7.5-19.el8.noarch 74/106 + Running scriptlet: xorg-x11-fonts-Type1-7.5-19.el8.noarch 74/106 + Installing : libmodman-2.0.1-17.el8.x86_64 75/106 + Running scriptlet: libmodman-2.0.1-17.el8.x86_64 75/106 + Installing : libproxy-0.4.15-5.2.el8.x86_64 76/106 + Running scriptlet: libproxy-0.4.15-5.2.el8.x86_64 76/106 + Installing : libkcapi-1.4.0-2.0.1.el8.x86_64 77/106 + Installing : libkcapi-hmaccalc-1.4.0-2.0.1.el8.x86_64 78/106 + Installing : libgusb-0.3.0-1.el8.x86_64 79/106 + Installing : colord-libs-1.4.2-1.el8.x86_64 80/106 + Installing : kbd-misc-2.0.4-11.el8.noarch 81/106 + Installing : kbd-legacy-2.0.4-11.el8.noarch 82/106 + Installing : kbd-2.0.4-11.el8.x86_64 83/106 + Installing : systemd-udev-239-78.0.4.el8.x86_64 84/106 + Running scriptlet: systemd-udev-239-78.0.4.el8.x86_64 84/106 + Installing : os-prober-1.74-9.0.1.el8.x86_64 85/106 + Installing : json-glib-1.4.4-1.el8.x86_64 86/106 + Installing : hardlink-1:1.3-6.el8.x86_64 87/106 + Installing : file-5.33-25.el8.x86_64 88/106 + Installing : dejavu-sans-mono-fonts-2.35-7.el8.noarch 89/106 + Installing : gsettings-desktop-schemas-3.32.0-6.el8.x86_64 90/106 + Installing : glib-networking-2.56.1-1.1.el8.x86_64 91/106 + Installing : libsoup-2.62.3-5.el8.x86_64 92/106 + Installing : rest-0.8.1-2.el8.x86_64 93/106 + Running scriptlet: rest-0.8.1-2.el8.x86_64 93/106 + Installing : cpio-2.12-11.el8.x86_64 94/106 + Installing : dracut-049-233.git20240115.0.1.el8.x86_64 95/106 + Running scriptlet: grub2-tools-1:2.02-156.0.2.el8.x86_64 96/106 + Installing : grub2-tools-1:2.02-156.0.2.el8.x86_64 96/106 + Running scriptlet: grub2-tools-1:2.02-156.0.2.el8.x86_64 96/106 + Installing : grubby-8.40-49.0.2.el8.x86_64 97/106 + Installing : crypto-policies-scripts-20230731-1.git3177e06.el 98/106 + Installing : nss-sysinit-3.90.0-7.el8_10.x86_64 99/106 + Installing : nss-3.90.0-7.el8_10.x86_64 100/106 + Installing : avahi-libs-0.7-27.el8.x86_64 101/106 + Installing : cups-libs-1:2.2.6-60.el8_10.x86_64 102/106 + Installing : java-11-openjdk-headless-1:11.0.24.0.8-3.0.1.el8 103/106 + Running scriptlet: java-11-openjdk-headless-1:11.0.24.0.8-3.0.1.el8 103/106 + Installing : gtk3-3.22.30-11.el8.x86_64 104/106 + Installing : java-11-openjdk-1:11.0.24.0.8-3.0.1.el8.x86_64 105/106 + Running scriptlet: java-11-openjdk-1:11.0.24.0.8-3.0.1.el8.x86_64 105/106 + Installing : java-11-openjdk-devel-1:11.0.24.0.8-3.0.1.el8.x8 106/106 + Running scriptlet: java-11-openjdk-devel-1:11.0.24.0.8-3.0.1.el8.x8 106/106 + Running scriptlet: copy-jdk-configs-4.0-2.el8.noarch 106/106 + Running scriptlet: dconf-0.28.0-4.0.1.el8.x86_64 106/106 + Running scriptlet: crypto-policies-scripts-20230731-1.git3177e06.el 106/106 + Running scriptlet: nss-3.90.0-7.el8_10.x86_64 106/106 + Running scriptlet: java-11-openjdk-headless-1:11.0.24.0.8-3.0.1.el8 106/106 + Running scriptlet: java-11-openjdk-1:11.0.24.0.8-3.0.1.el8.x86_64 106/106 + Running scriptlet: java-11-openjdk-devel-1:11.0.24.0.8-3.0.1.el8.x8 106/106 + Running scriptlet: hicolor-icon-theme-0.17-2.el8.noarch 106/106 + Running scriptlet: adwaita-icon-theme-3.28.0-3.el8.noarch 106/106 + Running scriptlet: shared-mime-info-1.9-4.el8.x86_64 106/106 + Running scriptlet: gdk-pixbuf2-2.36.12-6.el8_10.x86_64 106/106 + Running scriptlet: systemd-udev-239-78.0.4.el8.x86_64 106/106 + Verifying : avahi-libs-0.7-27.el8.x86_64 1/106 + Verifying : cpio-2.12-11.el8.x86_64 2/106 + Verifying : crypto-policies-scripts-20230731-1.git3177e06.el 3/106 + Verifying : cups-libs-1:2.2.6-60.el8_10.x86_64 4/106 + Verifying : dejavu-sans-mono-fonts-2.35-7.el8.noarch 5/106 + Verifying : dracut-049-233.git20240115.0.1.el8.x86_64 6/106 + Verifying : file-5.33-25.el8.x86_64 7/106 + Verifying : gdk-pixbuf2-2.36.12-6.el8_10.x86_64 8/106 + Verifying : gettext-0.19.8.1-17.el8.x86_64 9/106 + Verifying : gettext-libs-0.19.8.1-17.el8.x86_64 10/106 + Verifying : glib-networking-2.56.1-1.1.el8.x86_64 11/106 + Verifying : grub2-common-1:2.02-156.0.2.el8.noarch 12/106 + Verifying : grub2-tools-1:2.02-156.0.2.el8.x86_64 13/106 + Verifying : grub2-tools-minimal-1:2.02-156.0.2.el8.x86_64 14/106 + Verifying : grubby-8.40-49.0.2.el8.x86_64 15/106 + Verifying : gsettings-desktop-schemas-3.32.0-6.el8.x86_64 16/106 + Verifying : hardlink-1:1.3-6.el8.x86_64 17/106 + Verifying : json-glib-1.4.4-1.el8.x86_64 18/106 + Verifying : kbd-2.0.4-11.el8.x86_64 19/106 + Verifying : kbd-legacy-2.0.4-11.el8.noarch 20/106 + Verifying : kbd-misc-2.0.4-11.el8.noarch 21/106 + Verifying : libcroco-0.6.12-4.el8_2.1.x86_64 22/106 + Verifying : libgomp-8.5.0-22.0.1.el8_10.x86_64 23/106 + Verifying : libgusb-0.3.0-1.el8.x86_64 24/106 + Verifying : libkcapi-1.4.0-2.0.1.el8.x86_64 25/106 + Verifying : libkcapi-hmaccalc-1.4.0-2.0.1.el8.x86_64 26/106 + Verifying : libmodman-2.0.1-17.el8.x86_64 27/106 + Verifying : libpkgconf-1.4.2-1.el8.x86_64 28/106 + Verifying : libproxy-0.4.15-5.2.el8.x86_64 29/106 + Verifying : libsoup-2.62.3-5.el8.x86_64 30/106 + Verifying : lksctp-tools-1.0.18-3.el8.x86_64 31/106 + Verifying : memstrack-0.2.5-2.el8.x86_64 32/106 + Verifying : os-prober-1.74-9.0.1.el8.x86_64 33/106 + Verifying : pigz-2.4-4.el8.x86_64 34/106 + Verifying : pkgconf-1.4.2-1.el8.x86_64 35/106 + Verifying : pkgconf-m4-1.4.2-1.el8.noarch 36/106 + Verifying : pkgconf-pkg-config-1.4.2-1.el8.x86_64 37/106 + Verifying : shared-mime-info-1.9-4.el8.x86_64 38/106 + Verifying : systemd-udev-239-78.0.4.el8.x86_64 39/106 + Verifying : xz-5.2.4-4.el8_6.x86_64 40/106 + Verifying : abattis-cantarell-fonts-0.0.25-6.el8.noarch 41/106 + Verifying : adwaita-cursor-theme-3.28.0-3.el8.noarch 42/106 + Verifying : adwaita-icon-theme-3.28.0-3.el8.noarch 43/106 + Verifying : alsa-lib-1.2.10-2.el8.x86_64 44/106 + Verifying : at-spi2-atk-2.26.2-1.el8.x86_64 45/106 + Verifying : at-spi2-core-2.28.0-1.el8.x86_64 46/106 + Verifying : atk-2.28.1-1.el8.x86_64 47/106 + Verifying : cairo-1.15.12-6.el8.x86_64 48/106 + Verifying : cairo-gobject-1.15.12-6.el8.x86_64 49/106 + Verifying : colord-libs-1.4.2-1.el8.x86_64 50/106 + Verifying : copy-jdk-configs-4.0-2.el8.noarch 51/106 + Verifying : dconf-0.28.0-4.0.1.el8.x86_64 52/106 + Verifying : fribidi-1.0.4-9.el8.x86_64 53/106 + Verifying : gdk-pixbuf2-modules-2.36.12-6.el8_10.x86_64 54/106 + Verifying : graphite2-1.3.10-10.el8.x86_64 55/106 + Verifying : gtk-update-icon-cache-3.22.30-11.el8.x86_64 56/106 + Verifying : gtk3-3.22.30-11.el8.x86_64 57/106 + Verifying : harfbuzz-1.7.5-4.el8.x86_64 58/106 + Verifying : hicolor-icon-theme-0.17-2.el8.noarch 59/106 + Verifying : jasper-libs-2.0.14-5.el8.x86_64 60/106 + Verifying : java-11-openjdk-1:11.0.24.0.8-3.0.1.el8.x86_64 61/106 + Verifying : java-11-openjdk-devel-1:11.0.24.0.8-3.0.1.el8.x8 62/106 + Verifying : java-11-openjdk-headless-1:11.0.24.0.8-3.0.1.el8 63/106 + Verifying : javapackages-filesystem-5.3.0-1.module+el8+5136+ 64/106 + Verifying : jbigkit-libs-2.1-14.el8.x86_64 65/106 + Verifying : lcms2-2.9-2.el8.x86_64 66/106 + Verifying : libX11-1.6.8-8.el8.x86_64 67/106 + Verifying : libX11-common-1.6.8-8.el8.noarch 68/106 + Verifying : libXau-1.0.9-3.el8.x86_64 69/106 + Verifying : libXcomposite-0.4.4-14.el8.x86_64 70/106 + Verifying : libXcursor-1.1.15-3.el8.x86_64 71/106 + Verifying : libXdamage-1.1.4-14.el8.x86_64 72/106 + Verifying : libXext-1.3.4-1.el8.x86_64 73/106 + Verifying : libXfixes-5.0.3-7.el8.x86_64 74/106 + Verifying : libXft-2.3.3-1.el8.x86_64 75/106 + Verifying : libXi-1.7.10-1.el8.x86_64 76/106 + Verifying : libXinerama-1.1.4-1.el8.x86_64 77/106 + Verifying : libXrandr-1.5.2-1.el8.x86_64 78/106 + Verifying : libXrender-0.9.10-7.el8.x86_64 79/106 + Verifying : libXtst-1.2.3-7.el8.x86_64 80/106 + Verifying : libdatrie-0.2.9-7.el8.x86_64 81/106 + Verifying : libepoxy-1.5.8-1.el8.x86_64 82/106 + Verifying : libfontenc-1.1.3-8.el8.x86_64 83/106 + Verifying : libjpeg-turbo-1.5.3-12.el8.x86_64 84/106 + Verifying : libthai-0.1.27-2.el8.x86_64 85/106 + Verifying : libtiff-4.0.9-32.el8_10.x86_64 86/106 + Verifying : libwayland-client-1.21.0-1.el8.x86_64 87/106 + Verifying : libwayland-cursor-1.21.0-1.el8.x86_64 88/106 + Verifying : libwayland-egl-1.21.0-1.el8.x86_64 89/106 + Verifying : libxcb-1.13.1-1.el8.x86_64 90/106 + Verifying : libxkbcommon-0.9.1-1.el8.x86_64 91/106 + Verifying : lua-5.3.4-12.el8.x86_64 92/106 + Verifying : nspr-4.35.0-1.el8_8.x86_64 93/106 + Verifying : nss-3.90.0-7.el8_10.x86_64 94/106 + Verifying : nss-softokn-3.90.0-7.el8_10.x86_64 95/106 + Verifying : nss-softokn-freebl-3.90.0-7.el8_10.x86_64 96/106 + Verifying : nss-sysinit-3.90.0-7.el8_10.x86_64 97/106 + Verifying : nss-util-3.90.0-7.el8_10.x86_64 98/106 + Verifying : pango-1.42.4-8.el8.x86_64 99/106 + Verifying : pixman-0.38.4-4.el8.x86_64 100/106 + Verifying : rest-0.8.1-2.el8.x86_64 101/106 + Verifying : ttmkfdir-3.0.9-54.el8.x86_64 102/106 + Verifying : tzdata-java-2024a-1.0.1.el8.noarch 103/106 + Verifying : xkeyboard-config-2.28-1.el8.noarch 104/106 + Verifying : xorg-x11-font-utils-1:7.5-41.el8.x86_64 105/106 + Verifying : xorg-x11-fonts-Type1-7.5-19.el8.noarch 106/106 + +Installed: + abattis-cantarell-fonts-0.0.25-6.el8.noarch + adwaita-cursor-theme-3.28.0-3.el8.noarch + adwaita-icon-theme-3.28.0-3.el8.noarch + alsa-lib-1.2.10-2.el8.x86_64 + at-spi2-atk-2.26.2-1.el8.x86_64 + at-spi2-core-2.28.0-1.el8.x86_64 + atk-2.28.1-1.el8.x86_64 + avahi-libs-0.7-27.el8.x86_64 + cairo-1.15.12-6.el8.x86_64 + cairo-gobject-1.15.12-6.el8.x86_64 + colord-libs-1.4.2-1.el8.x86_64 + copy-jdk-configs-4.0-2.el8.noarch + cpio-2.12-11.el8.x86_64 + crypto-policies-scripts-20230731-1.git3177e06.el8.noarch + cups-libs-1:2.2.6-60.el8_10.x86_64 + dconf-0.28.0-4.0.1.el8.x86_64 + dejavu-sans-mono-fonts-2.35-7.el8.noarch + dracut-049-233.git20240115.0.1.el8.x86_64 + file-5.33-25.el8.x86_64 + fribidi-1.0.4-9.el8.x86_64 + gdk-pixbuf2-2.36.12-6.el8_10.x86_64 + gdk-pixbuf2-modules-2.36.12-6.el8_10.x86_64 + gettext-0.19.8.1-17.el8.x86_64 + gettext-libs-0.19.8.1-17.el8.x86_64 + glib-networking-2.56.1-1.1.el8.x86_64 + graphite2-1.3.10-10.el8.x86_64 + grub2-common-1:2.02-156.0.2.el8.noarch + grub2-tools-1:2.02-156.0.2.el8.x86_64 + grub2-tools-minimal-1:2.02-156.0.2.el8.x86_64 + grubby-8.40-49.0.2.el8.x86_64 + gsettings-desktop-schemas-3.32.0-6.el8.x86_64 + gtk-update-icon-cache-3.22.30-11.el8.x86_64 + gtk3-3.22.30-11.el8.x86_64 + hardlink-1:1.3-6.el8.x86_64 + harfbuzz-1.7.5-4.el8.x86_64 + hicolor-icon-theme-0.17-2.el8.noarch + jasper-libs-2.0.14-5.el8.x86_64 + java-11-openjdk-1:11.0.24.0.8-3.0.1.el8.x86_64 + java-11-openjdk-devel-1:11.0.24.0.8-3.0.1.el8.x86_64 + java-11-openjdk-headless-1:11.0.24.0.8-3.0.1.el8.x86_64 + javapackages-filesystem-5.3.0-1.module+el8+5136+7ff78f74.noarch + jbigkit-libs-2.1-14.el8.x86_64 + json-glib-1.4.4-1.el8.x86_64 + kbd-2.0.4-11.el8.x86_64 + kbd-legacy-2.0.4-11.el8.noarch + kbd-misc-2.0.4-11.el8.noarch + lcms2-2.9-2.el8.x86_64 + libX11-1.6.8-8.el8.x86_64 + libX11-common-1.6.8-8.el8.noarch + libXau-1.0.9-3.el8.x86_64 + libXcomposite-0.4.4-14.el8.x86_64 + libXcursor-1.1.15-3.el8.x86_64 + libXdamage-1.1.4-14.el8.x86_64 + libXext-1.3.4-1.el8.x86_64 + libXfixes-5.0.3-7.el8.x86_64 + libXft-2.3.3-1.el8.x86_64 + libXi-1.7.10-1.el8.x86_64 + libXinerama-1.1.4-1.el8.x86_64 + libXrandr-1.5.2-1.el8.x86_64 + libXrender-0.9.10-7.el8.x86_64 + libXtst-1.2.3-7.el8.x86_64 + libcroco-0.6.12-4.el8_2.1.x86_64 + libdatrie-0.2.9-7.el8.x86_64 + libepoxy-1.5.8-1.el8.x86_64 + libfontenc-1.1.3-8.el8.x86_64 + libgomp-8.5.0-22.0.1.el8_10.x86_64 + libgusb-0.3.0-1.el8.x86_64 + libjpeg-turbo-1.5.3-12.el8.x86_64 + libkcapi-1.4.0-2.0.1.el8.x86_64 + libkcapi-hmaccalc-1.4.0-2.0.1.el8.x86_64 + libmodman-2.0.1-17.el8.x86_64 + libpkgconf-1.4.2-1.el8.x86_64 + libproxy-0.4.15-5.2.el8.x86_64 + libsoup-2.62.3-5.el8.x86_64 + libthai-0.1.27-2.el8.x86_64 + libtiff-4.0.9-32.el8_10.x86_64 + libwayland-client-1.21.0-1.el8.x86_64 + libwayland-cursor-1.21.0-1.el8.x86_64 + libwayland-egl-1.21.0-1.el8.x86_64 + libxcb-1.13.1-1.el8.x86_64 + libxkbcommon-0.9.1-1.el8.x86_64 + lksctp-tools-1.0.18-3.el8.x86_64 + lua-5.3.4-12.el8.x86_64 + memstrack-0.2.5-2.el8.x86_64 + nspr-4.35.0-1.el8_8.x86_64 + nss-3.90.0-7.el8_10.x86_64 + nss-softokn-3.90.0-7.el8_10.x86_64 + nss-softokn-freebl-3.90.0-7.el8_10.x86_64 + nss-sysinit-3.90.0-7.el8_10.x86_64 + nss-util-3.90.0-7.el8_10.x86_64 + os-prober-1.74-9.0.1.el8.x86_64 + pango-1.42.4-8.el8.x86_64 + pigz-2.4-4.el8.x86_64 + pixman-0.38.4-4.el8.x86_64 + pkgconf-1.4.2-1.el8.x86_64 + pkgconf-m4-1.4.2-1.el8.noarch + pkgconf-pkg-config-1.4.2-1.el8.x86_64 + rest-0.8.1-2.el8.x86_64 + shared-mime-info-1.9-4.el8.x86_64 + systemd-udev-239-78.0.4.el8.x86_64 + ttmkfdir-3.0.9-54.el8.x86_64 + tzdata-java-2024a-1.0.1.el8.noarch + xkeyboard-config-2.28-1.el8.noarch + xorg-x11-font-utils-1:7.5-41.el8.x86_64 + xorg-x11-fonts-Type1-7.5-19.el8.noarch + xz-5.2.4-4.el8_6.x86_64 + +Complete! +Last metadata expiration check: 0:00:23 ago on Tue 20 Aug 2024 08:55:14 AM UTC. +Package iproute-6.2.0-5.el8_9.x86_64 is already installed. +Dependencies resolved. +================================================================================ + Package Architecture Version Repository Size +================================================================================ +Upgrading: + iproute x86_64 6.2.0-6.el8_10 ol8_baseos_latest 853 k + +Transaction Summary +================================================================================ +Upgrade 1 Package + +Total download size: 853 k +Downloading Packages: +iproute-6.2.0-6.el8_10.x86_64.rpm 4.2 MB/s | 853 kB 00:00 +-------------------------------------------------------------------------------- +Total 4.2 MB/s | 853 kB 00:00 +Running transaction check +Transaction check succeeded. +Running transaction test +Transaction test succeeded. +Running transaction + Preparing : 1/1 + Upgrading : iproute-6.2.0-6.el8_10.x86_64 1/2 + Cleanup : iproute-6.2.0-5.el8_9.x86_64 2/2 + Running scriptlet: iproute-6.2.0-5.el8_9.x86_64 2/2 + Verifying : iproute-6.2.0-6.el8_10.x86_64 1/2 + Verifying : iproute-6.2.0-5.el8_9.x86_64 2/2 + +Upgraded: + iproute-6.2.0-6.el8_10.x86_64 + +Complete! +24 files removed +Removing intermediate container fe168b01f3ad + ---> 791878694a50 +Step 5/12 : RUN curl -o /tmp/ords-$ORDSVERSION.el8.noarch.rpm https://yum.oracle.com/repo/OracleLinux/OL8/oracle/software/x86_64/getPackage/ords-$ORDSVERSION.el8.noarch.rpm + ---> Running in 59d7143da358 + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed +100 108M 100 108M 0 0 1440k 0 0:01:16 0:01:16 --:--:-- 1578k +Removing intermediate container 59d7143da358 + ---> 17c4534293e5 +Step 6/12 : RUN rpm -ivh /tmp/ords-$ORDSVERSION.el8.noarch.rpm + ---> Running in 84b1cbffdc51 +Verifying... ######################################## +Preparing... ######################################## +Updating / installing... +ords-23.4.0-8.el8 ######################################## +INFO: Before starting ORDS service, run the below command as user oracle: + ords --config /etc/ords/config install +Removing intermediate container 84b1cbffdc51 + ---> 6e7151b79588 +Step 7/12 : RUN mkdir -p $ORDS_HOME/doc_root && mkdir -p $ORDS_HOME/error && mkdir -p $ORDS_HOME/secrets && chmod ug+x $ORDS_HOME/*.sh && groupadd -g 54322 dba && usermod -u 54321 -d /home/oracle -g dba -m -s /bin/bash oracle && chown -R oracle:dba $ORDS_HOME && echo "oracle ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + ---> Running in 66e5db5f343f +Removing intermediate container 66e5db5f343f + ---> 0523dc897bf4 +Step 8/12 : USER oracle + ---> Running in ffda8495ac77 +Removing intermediate container ffda8495ac77 + ---> 162acd4d0b93 +Step 9/12 : WORKDIR /home/oracle + ---> Running in 8c14310ffbc7 +Removing intermediate container 8c14310ffbc7 + ---> c8dae809e772 +Step 10/12 : VOLUME ["$ORDS_HOME/config/ords"] + ---> Running in ed64548fd997 +Removing intermediate container ed64548fd997 + ---> 22e2c99247b0 +Step 11/12 : EXPOSE 8888 + ---> Running in 921f7c85d61d +Removing intermediate container 921f7c85d61d + ---> e5d503c92224 +Step 12/12 : CMD $ORDS_HOME/$RUN_FILE + ---> Running in cad487298d63 +Removing intermediate container cad487298d63 + ---> fdb17aa242f8 +Successfully built fdb17aa242f8 +Successfully tagged oracle/ords-dboper:latest +08:57:18 oracle@mitk01:# + diff --git a/docs/multitenant/usecase01/logfiles/ImagePush.log b/docs/multitenant/ords-based/usecase01/logfiles/ImagePush.log similarity index 100% rename from docs/multitenant/usecase01/logfiles/ImagePush.log rename to docs/multitenant/ords-based/usecase01/logfiles/ImagePush.log diff --git a/docs/multitenant/usecase01/logfiles/cdb.log b/docs/multitenant/ords-based/usecase01/logfiles/cdb.log similarity index 100% rename from docs/multitenant/usecase01/logfiles/cdb.log rename to docs/multitenant/ords-based/usecase01/logfiles/cdb.log diff --git a/docs/multitenant/usecase01/logfiles/cdb_creation.log b/docs/multitenant/ords-based/usecase01/logfiles/cdb_creation.log similarity index 100% rename from docs/multitenant/usecase01/logfiles/cdb_creation.log rename to docs/multitenant/ords-based/usecase01/logfiles/cdb_creation.log diff --git a/docs/multitenant/ords-based/usecase01/logfiles/openssl_execution.log b/docs/multitenant/ords-based/usecase01/logfiles/openssl_execution.log new file mode 100644 index 00000000..e3915a21 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/logfiles/openssl_execution.log @@ -0,0 +1,19 @@ +CREATING TLS CERTIFICATES +/usr/bin/openssl genrsa -out ca.key 2048 +Generating RSA private key, 2048 bit long modulus (2 primes) +......................+++++ +..................................................+++++ +e is 65537 (0x010001) +/usr/bin/openssl req -new -x509 -days 365 -key ca.key -subj "/C=US/ST=California/L=SanFrancisco/O=oracle /CN=cdb-dev-ords.oracle-database-operator-system /CN=localhost Root CA " -out ca.crt +/usr/bin/openssl req -newkey rsa:2048 -nodes -keyout tls.key -subj "/C=US/ST=California/L=SanFrancisco/O=oracle /CN=cdb-dev-ords.oracle-database-operator-system /CN=localhost" -out server.csr +Generating a RSA private key +...........+++++ +...........................................+++++ +writing new private key to 'tls.key' +----- +/usr/bin/echo "subjectAltName=DNS:cdb-dev-ords.oracle-database-operator-system,DNS:www.example.com" > extfile.txt +/usr/bin/openssl x509 -req -extfile extfile.txt -days 365 -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out tls.crt +Signature ok +subject=C = US, ST = California, L = SanFrancisco, O = "oracle ", CN = "cdb-dev-ords.oracle-database-operator-system ", CN = localhost +Getting CA Private Key + diff --git a/docs/multitenant/ords-based/usecase01/logfiles/ordsconfig.log b/docs/multitenant/ords-based/usecase01/logfiles/ordsconfig.log new file mode 100644 index 00000000..b787b752 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/logfiles/ordsconfig.log @@ -0,0 +1,39 @@ +ORDS: Release 23.4 Production on Tue Aug 20 07:48:44 2024 + +Copyright (c) 2010, 2024, Oracle. + +Configuration: + /etc/ords/config/ + +Database pool: default + +Setting Value Source +----------------------------------------- -------------------------------------------------- ----------- +database.api.enabled true Global +database.api.management.services.disabled false Global +db.cdb.adminUser C##DBAPI_CDB_ADMIN AS SYSDBA Pool +db.cdb.adminUser.password ****** Pool Wallet +db.connectionType customurl Pool +db.customURL jdbc:oracle:thin:@(DESCRIPTION=(CONNECT_TIMEOUT=90 Pool + )(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNEC + T_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL= + TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONL + Y))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST= + scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNEC + T_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS))) +db.password ****** Pool Wallet +db.serviceNameSuffix Pool +db.username ORDS_PUBLIC_USER Pool +error.externalPath /opt/oracle/ords/error Global +jdbc.InitialLimit 50 Pool +jdbc.MaxLimit 100 Pool +misc.pagination.maxRows 1000 Pool +plsql.gateway.mode disabled Pool +restEnabledSql.active true Pool +security.requestValidationFunction ords_util.authorize_plsql_gateway Pool +security.verifySSL true Global +standalone.access.log /home/oracle Global +standalone.https.cert /opt/oracle/ords//secrets/tls.crt Global +standalone.https.cert.key /opt/oracle/ords//secrets/tls.key Global +standalone.https.port 8888 Global + diff --git a/docs/multitenant/usecase01/logfiles/tagandpush.log b/docs/multitenant/ords-based/usecase01/logfiles/tagandpush.log similarity index 100% rename from docs/multitenant/usecase01/logfiles/tagandpush.log rename to docs/multitenant/ords-based/usecase01/logfiles/tagandpush.log diff --git a/docs/multitenant/usecase01/logfiles/testapi.log b/docs/multitenant/ords-based/usecase01/logfiles/testapi.log similarity index 100% rename from docs/multitenant/usecase01/logfiles/testapi.log rename to docs/multitenant/ords-based/usecase01/logfiles/testapi.log diff --git a/docs/multitenant/ords-based/usecase01/makefile b/docs/multitenant/ords-based/usecase01/makefile new file mode 100644 index 00000000..ec454e28 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/makefile @@ -0,0 +1,906 @@ +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# __ __ _ __ _ _ +# | \/ | __ _| | _____ / _(_) | ___ +# | |\/| |/ _` | |/ / _ \ |_| | |/ _ \ +# | | | | (_| | < __/ _| | | __/ +# |_| |_|\__,_|_|\_\___|_| |_|_|\___| +# | | | | ___| |_ __ ___ _ __ +# | |_| |/ _ \ | '_ \ / _ \ '__| +# | _ | __/ | |_) | __/ | +# |_| |_|\___|_| .__/ \___|_| +# |_| +# +# WARNING: Using this makefile helps you to customize yaml +# files. Edit parameters.txt with your enviroment +# informartion and execute the following steps +# +# 1) make operator +# it configures the operator yaml files with the +# watch namelist required by the multitenant controllers +# +# 2) make genyaml +# It automatically creates all the yaml files based on the +# information available in the parameters file +# +# 3) make secrets +# It configure the required secrets necessary to operate +# with pdbs multitenant controllers +# +# 4) make runall01 +# Start a series of operation create open close delete and so on +# +# LIST OF GENERAED YAML FILE +# +# ----------------------------- ---------------------------------- +# oracle-database-operator.yaml : oracle database operator +# cdbnamespace_binding.yaml : role binding for cdbnamespace +# pdbnamespace_binding.yaml : role binding for pdbnamespace +# create_cdb_secret.yaml : create secrets for ords server pod +# create_pdb_secret.yaml : create secrets for pluggable database +# create_ords_pod.yaml : create rest server pod +# create_pdb1_resource.yaml : create first pluggable database +# create_pdb2_resource.yaml : create second pluggable database +# open_pdb1_resource.yaml : open first pluggable database +# open_pdb2_resource.yaml : open second pluggable database +# close_pdb1_resource.yaml : close first pluggable database +# close_pdb2_resource.yaml : close second pluggable database +# clone_pdb_resource.yaml : clone thrid pluggable database +# clone_pdb2_resource.yaml : clone 4th pluggable database +# delete_pdb1_resource.yaml : delete first pluggable database +# delete_pdb2_resource.yaml : delete sencond pluggable database +# delete_pdb3_resource.yaml : delete thrid pluggable database +# unplug_pdb1_resource.yaml : unplug first pluggable database +# plug_pdb1_resource.yaml : plug first pluggable database +# map_pdb1_resource.yaml : map the first pluggable database +# config_map.yam : pdb parameters array +# +DATE := `date "+%y%m%d%H%M%S"` +###################### +# PARAMETER SECTIONS # +###################### + +export PARAMETERS=parameters.txt +export TNSALIAS=$(shell cat $(PARAMETERS) |grep -v ^\#|grep TNSALIAS|cut -d : -f 2) +export ORDPWD=$(shell cat $(PARAMETERS)|grep -v ^\#|grep ORDPWD|cut -d : -f 2) +export SYSPWD=$(shell cat $(PARAMETERS)|grep -v ^\#|grep SYSPWD|cut -d : -f 2) +export WBUSER=$(shell cat $(PARAMETERS)|grep -v ^\#|grep WBUSER|cut -d : -f 2) +export WBPASS=$(shell cat $(PARAMETERS)|grep -v ^\#|grep WBPASS|cut -d : -f 2) +export PDBUSR=$(shell cat $(PARAMETERS)|grep -v ^\#|grep PDBUSR|cut -d : -f 2) +export PDBPWD=$(shell cat $(PARAMETERS)|grep -v ^\#|grep PDBPWD|cut -d : -f 2) +export CDBUSR=$(shell cat $(PARAMETERS)|grep -v ^\#|grep CDBUSR|cut -d : -f 2) +export CDBPWD=$(shell cat $(PARAMETERS)|grep -v ^\#|grep CDBPWD|cut -d : -f 2) +export OPRNAMESPACE=$(shell cat $(PARAMETERS)|grep -v ^\#|grep OPRNAMESPACE|cut -d : -f 2) +export OPRNAMESPACE=$(shell cat $(PARAMETERS)|grep -v ^\#|grep OPRNAMESPACE|cut -d : -f 2) +export ORDSIMG=$(shell cat $(PARAMETERS)|grep -v ^\#|grep ORDSIMG|cut -d : -f 2,3) +export COMPANY=$(shell cat $(PARAMETERS)|grep -v ^\#|grep COMPANY|cut -d : -f 2) +export APIVERSION=$(shell cat $(PARAMETERS)|grep -v ^\#|grep APIVERSION|cut -d : -f 2) +export OPRNAMESPACE=oracle-database-operator-system +export ORACLE_OPERATOR_YAML=../../../../oracle-database-operator.yaml +export TEST_EXEC_TIMEOUT=3m +export IMAGE=oracle/ords-dboper:latest +export ORDSIMGDIR=../../../../ords + +REST_SERVER=ords +SKEY=tls.key +SCRT=tls.crt +CART=ca.crt +PRVKEY=ca.key +PUBKEY=public.pem +COMPANY=oracle +RUNTIME=/usr/bin/podman + +################# +### FILE LIST ### +################# + +export ORDS_POD=create_ords_pod.yaml + +export CDB_SECRETS=create_cdb_secrets.yaml +export PDB_SECRETS=create_pdb_secrets.yaml + +export PDBCRE1=create_pdb1_resource.yaml +export PDBCRE2=create_pdb2_resource.yaml + +export PDBCLOSE1=close_pdb1_resource.yaml +export PDBCLOSE2=close_pdb2_resource.yaml +export PDBCLOSE3=close_pdb3_resource.yaml + +export PDBOPEN1=open_pdb1_resource.yaml +export PDBOPEN2=open_pdb2_resource.yaml +export PDBOPEN3=open_pdb3_resource.yaml + +export PDBCLONE1=clone_pdb1_resource.yaml +export PDBCLONE2=clone_pdb2_resource.yaml + +export PDBDELETE1=delete_pdb1_resource.yaml +export PDBDELETE2=delete_pdb2_resource.yaml +export PDBDELETE3=delete_pdb3_resource.yaml + +export PDBUNPLUG1=unplug_pdb1_resource.yaml +export PDBPLUG1=plug_pdb1_resource.yaml + +export PDBMAP1=map_pdb1_resource.yaml +export PDBMAP2=map_pdb2_resource.yaml +export PDBMAP3=map_pdb3_resource.yaml + +export PDBMAP1=map_pdb1_resource.yaml +export PDBMAP2=map_pdb2_resource.yaml +export PDBMAP3=map_pdb3_resource.yaml + + +##BINARIES +export KUBECTL=/usr/bin/kubectl +OPENSSL=/usr/bin/openssl +ECHO=/usr/bin/echo +RM=/usr/bin/rm +CP=/usr/bin/cp +TAR=/usr/bin/tar +MKDIR=/usr/bin/mkdir +SED=/usr/bin/sed + +define msg +@printf "\033[31;7m%s\033[0m\r" "......................................]" +@printf "\033[31;7m[\xF0\x9F\x91\x89 %s\033[0m\n" $(1) +endef + +check: + $(call msg,"CHECK PARAMETERS") + @printf "TNSALIAS...............:%.60s....\n" $(TNSALIAS) + @printf "ORDPWD.................:%s\n" $(ORDPWD) + @printf "SYSPWD.................:%s\n" $(SYSPWD) + @printf "WBUSER.................:%s\n" $(WBUSER) + @printf "WBPASS.................:%s\n" $(WBPASS) + @printf "PDBUSR.................:%s\n" $(PDBUSR) + @printf "PDBPWD.................:%s\n" $(PDBPWD) + @printf "CDBUSR.................:%s\n" $(CDBUSR) + @printf "CDBPWD.................:%s\n" $(CDBPWD) + @printf "OPRNAMESPACE...........:%s\n" $(OPRNAMESPACE) + @printf "COMPANY................:%s\n" $(COMPANY) + @printf "APIVERSION.............:%s\n" $(APIVERSION) + + +tlscrt: + $(call msg,"TLS GENERATION") + #$(OPENSSL) genrsa -out $(PRVKEY) 2048 + $(OPENSSL) genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > $(PRVKEY) + $(OPENSSL) req -new -x509 -days 365 -key $(PRVKEY) \ + -subj "/C=CN/ST=GD/L=SZ/O=$(COMPANY), Inc./CN=$(COMPANY) Root CA" -out ca.crt + $(OPENSSL) req -newkey rsa:2048 -nodes -keyout $(SKEY) -subj \ + "/C=CN/ST=GD/L=SZ/O=$(COMPANY), Inc./CN=cdb-dev-$(REST_SERVER).$(OPRNAMESPACE)" -out server.csr + $(ECHO) "subjectAltName=DNS:cdb-dev-$(REST_SERVER).$(OPRNAMESPACE)" > extfile.txt + $(OPENSSL) x509 -req -extfile extfile.txt -days 365 -in server.csr -CA ca.crt -CAkey $(PRVKEY) -CAcreateserial -out $(SCRT) + $(OPENSSL) rsa -in $(PRVKEY) -outform PEM -pubout -out $(PUBKEY) + +tlssec: + $(call msg,"GENERATE TLS SECRET") + $(KUBECTL) create secret tls db-tls --key="$(SKEY)" --cert="$(SCRT)" -n $(OPRNAMESPACE) + $(KUBECTL) create secret generic db-ca --from-file="$(CART)" -n $(OPRNAMESPACE) + + +delsec: + $(call msg,"CLEAN OLD SECRETS") + $(eval SECRETSP:=$(shell kubectl get secrets -n $(OPRNAMESPACE) -o custom-columns=":metadata.name" --no-headers|grep -v webhook-server-cert) ) + @[ "${SECRETSP}" ] && ( \ + printf "Deleteing secrets in namespace -n $(OPRNAMESPACE)\n") &&\ + ($(KUBECTL) delete secret $(SECRETSP) -n $(OPRNAMESPACE))\ + || ( echo "No screts in namespace $(OPRNAMESPACE)") + + +###### ENCRYPTED SECRETS ###### +export PRVKEY=ca.key +export PUBKEY=public.pem +WBUSERFILE=wbuser.txt +WBPASSFILE=wbpass.txt +CDBUSRFILE=cdbusr.txt +CDBPWDFILE=cdbpwd.txt +SYSPWDFILE=syspwd.txt +ORDPWDFILE=ordpwd.txt +PDBUSRFILE=pdbusr.txt +PDBPWDFILE=pdbpwd.txt + + + +secrets: delsec tlscrt tlssec + $(OPENSSL) rsa -in $(PRVKEY) -outform PEM -pubout -out $(PUBKEY) + $(KUBECTL) create secret generic pubkey --from-file=publicKey=$(PUBKEY) -n $(OPRNAMESPACE) + $(KUBECTL) create secret generic prvkey --from-file=privateKey=$(PRVKEY) -n $(OPRNAMESPACE) + @$(ECHO) $(WBUSER) > $(WBUSERFILE) + @$(ECHO) $(WBPASS) > $(WBPASSFILE) + @$(ECHO) $(CDBPWD) > $(CDBPWDFILE) + @$(ECHO) $(CDBUSR) > $(CDBUSRFILE) + @$(ECHO) $(SYSPWD) > $(SYSPWDFILE) + @$(ECHO) $(ORDPWD) > $(ORDPWDFILE) + @$(ECHO) $(PDBUSR) > $(PDBUSRFILE) + @$(ECHO) $(PDBPWD) > $(PDBPWDFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(WBUSERFILE) |base64 > e_$(WBUSERFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(WBPASSFILE) |base64 > e_$(WBPASSFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(CDBPWDFILE) |base64 > e_$(CDBPWDFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(CDBUSRFILE) |base64 > e_$(CDBUSRFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(SYSPWDFILE) |base64 > e_$(SYSPWDFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(ORDPWDFILE) |base64 > e_$(ORDPWDFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(PDBUSRFILE) |base64 > e_$(PDBUSRFILE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(PDBPWDFILE) |base64 > e_$(PDBPWDFILE) + $(KUBECTL) create secret generic wbuser --from-file=e_$(WBUSERFILE) -n $(OPRNAMESPACE) + $(KUBECTL) create secret generic wbpass --from-file=e_$(WBPASSFILE) -n $(OPRNAMESPACE) + $(KUBECTL) create secret generic cdbpwd --from-file=e_$(CDBPWDFILE) -n $(OPRNAMESPACE) + $(KUBECTL) create secret generic cdbusr --from-file=e_$(CDBUSRFILE) -n $(OPRNAMESPACE) + $(KUBECTL) create secret generic syspwd --from-file=e_$(SYSPWDFILE) -n $(OPRNAMESPACE) + $(KUBECTL) create secret generic ordpwd --from-file=e_$(ORDPWDFILE) -n $(OPRNAMESPACE) + $(KUBECTL) create secret generic pdbusr --from-file=e_$(PDBUSRFILE) -n $(OPRNAMESPACE) + $(KUBECTL) create secret generic pdbpwd --from-file=e_$(PDBPWDFILE) -n $(OPRNAMESPACE) + $(RM) $(WBUSERFILE) $(WBPASSFILE) $(CDBPWDFILE) $(CDBUSRFILE) $(SYSPWDFILE) $(ORDPWDFILE) $(PDBUSRFILE) $(PDBPWDFILE) + $(RM) e_$(WBUSERFILE) e_$(WBPASSFILE) e_$(CDBPWDFILE) e_$(CDBUSRFILE) e_$(SYSPWDFILE) e_$(ORDPWDFILE) e_$(PDBUSRFILE) e_$(PDBPWDFILE) + + +### YAML FILE SECTION ### +operator: + $(CP) ${ORACLE_OPERATOR_YAML} . + ${CP} `basename ${ORACLE_OPERATOR_YAML}` `basename ${ORACLE_OPERATOR_YAML}`.ORG + $(SED) -i 's/value: ""/value: $(OPRNAMESPACE)/g' `basename ${ORACLE_OPERATOR_YAML}` + + +define _script00 +cat < authsection01.yaml + sysAdminPwd: + secret: + secretName: "syspwd" + key: "e_syspwd.txt" + ordsPwd: + secret: + secretName: "ordpwd" + key: "e_ordpwd.txt" + cdbAdminUser: + secret: + secretName: "cdbusr" + key: "e_cdbusr.txt" + cdbAdminPwd: + secret: + secretName: "cdbpwd" + key: "e_cdbpwd.txt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + cdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + cdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + cdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" +EOF + +cat< authsection02.yaml + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" +EOF + + +cat < ${OPRNAMESPACE}_binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: oracle-database-operator-oracle-database-operator-manager-rolebinding1 + namespace: ${OPRNAMESPACE} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system +EOF + +cat < ${OPRNAMESPACE}_binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: oracle-database-operator-oracle-database-operator-manager-rolebinding2 + namespace: ${OPRNAMESPACE} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system +EOF + +endef +export script00 = $(value _script00) +secyaml: + @ eval "$$script00" + +#echo ords pod creation +define _script01 +cat < ${ORDS_POD} +apiVersion: database.oracle.com/${APIVERSION} +kind: CDB +metadata: + name: cdb-dev + namespace: oracle-database-operator-system +spec: + cdbName: "DB12" + ordsImage: ${ORDSIMG} + ordsImagePullPolicy: "Always" + dbTnsurl : ${TNSALIAS} + replicas: 1 + deletePdbCascade: true +EOF + +cat authsection01.yaml >> ${ORDS_POD} + +endef +export script01 = $(value _script01) + + +define _script02 + +cat <${PDBCRE1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + assertivePdbDeletion: true + fileNameConversions: "NONE" + unlimitedStorage: false + tdeImport: false + totalSize: "2G" + tempSize: "800M" + action: "Create" +EOF + +cat < ${PDBCRE2} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb2 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbprd" + assertivePdbDeletion: true + fileNameConversions: "NONE" + unlimitedStorage: false + tdeImport: false + totalSize: "2G" + tempSize: "800M" + action: "Create" +EOF + +cat <${PDBOPEN1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" +EOF + +cat <${PDBOPEN2} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb2 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbprd" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" +EOF + +cat <${PDBOPEN3} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb3 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "new_clone" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" +EOF + +cat <${PDBCLOSE1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" +EOF + +cat <${PDBCLOSE2} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb2 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbprd" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" +EOF + +cat <${PDBCLOSE3} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb3 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: ""new_clone" + pdbState: "CLOSE" + modifyOption: "IMMEDIATE" + action: "Modify" +EOF + +cat < ${PDBCLONE1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb3 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "new_clone" + srcPdbName: "pdbdev" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + assertivePdbDeletion: true + action: "Clone" +EOF + +cat < ${PDBCLONE2} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb4 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "new_clone2" + srcPdbName: "pdbprd" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + assertivePdbDeletion: true + action: "Clone" +EOF + + +cat < ${PDBDELETE1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + pdbName: "pdbdev" + action: "Delete" + dropAction: "INCLUDING" +EOF + +cat < ${PDBDELETE2} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb2 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + pdbName: "pdbprd" + action: "Delete" + dropAction: "INCLUDING" +EOF + +cat < ${PDBUNPLUG1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "Unplug" +EOF + +cat <${PDBPLUG1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "plug" + fileNameConversions: "NONE" + sourceFileNameConversions: "NONE" + copyAction: "MOVE" + totalSize: "1G" + tempSize: "100M" + assertivePdbDeletion: true + action: "Plug" +EOF + +cat <${PDBMAP1} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb1 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbdev" + assertivePdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" +EOF + +cat <${PDBMAP2} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb2 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "pdbprd" + assertivePdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" +EOF + + +cat <${PDBMAP3} +apiVersion: database.oracle.com/${APIVERSION} +kind: PDB +metadata: + name: pdb3 + namespace: ${OPRNAMESPACE} + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "${OPRNAMESPACE}" + cdbName: "DB12" + pdbName: "new_clone" + assertivePdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" +EOF + + +## Auth information +for _file in ${PDBCRE1} ${PDBCRE2} ${PDBOPEN1} ${PDBOPEN2} ${PDBOPEN3} ${PDBCLOSE1} ${PDBCLOSE2} ${PDBCLOSE3} ${PDBCLONE1} ${PDBCLONE2} ${PDBDELETE1} ${PDBDELETE2} ${PDBUNPLUG1} ${PDBPLUG1} ${PDBMAP1} ${PDBMAP2} ${PDBMAP3} +do +ls -ltr ${_file} + cat authsection02.yaml >> ${_file} +done +rm authsection02.yaml +rm authsection01.yaml +endef + +export script02 = $(value _script02) + +genyaml: secyaml + @ eval "$$script01" + @ eval "$$script02" + +cleanyaml: + - $(RM) $(PDBMAP3) $(PDBMAP2) $(PDBMAP1) $(PDBPLUG1) $(PDBUNPLUG1) $(PDBDELETE2) $(PDBDELETE1) $(PDBCLONE2) $(PDBCLONE1) $(PDBCLOSE3) $(PDBCLOSE2) $(PDBCLOSE1) $(PDBOPEN3) $(PDBOPEN2) $(PDBOPEN1) $(PDBCRE2) $(PDBCRE1) $(ORDS_POD) $(CDB_SECRETS) $(PDB_SECRETS) + - $(RM) ${OPRNAMESPACE}_binding.yaml ${OPRNAMESPACE}_binding.yaml + + +cleancrt: + - $(RM) $(SKEY) $(SCRT) $(CART) $(PRVKEY) $(PUBKEY) server.csr extfile.txt ca.srl + + +################# +### PACKAGING ### +################# + +pkg: + - $(RM) -rf /tmp/pkgtestplan + $(MKDIR) /tmp/pkgtestplan + $(CP) -R * /tmp/pkgtestplan + $(CP) ../../../../oracle-database-operator.yaml /tmp/pkgtestplan/ + $(TAR) -C /tmp -cvf ~/pkgtestplan_$(DATE).tar pkgtestplan + +################ +### diag ### +################ + +login: + $(KUBECTL) exec `$(KUBECTL) get pods -n $(OPRNAMESPACE)|grep ords|cut -d ' ' -f 1` -n $(OPRNAMESPACE) -it -- /bin/bash + + +reloadop: + echo "RESTARTING OPERATOR" + $(eval OP1 := $(shell $(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -1|cut -d ' ' -f 1 )) + $(eval OP2 := $(shell $(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -2|tail -1|cut -d ' ' -f 1 )) + $(eval OP3 := $(shell $(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|tail -1|cut -d ' ' -f 1 )) + $(KUBECTL) get pod $(OP1) -n $(OPRNAMESPACE) -o yaml | kubectl replace --force -f - + $(KUBECTL) get pod $(OP2) -n $(OPRNAMESPACE) -o yaml | kubectl replace --force -f - + $(KUBECTL) get pod $(OP3) -n $(OPRNAMESPACE) -o yaml | kubectl replace --force -f - + + +dump: + @$(eval TMPSP := $(shell date "+%y%m%d%H%M%S" )) + @$(eval DIAGFILE := ./opdmp.$(TMPSP)) + @>$(DIAGFILE) + @echo "OPERATOR DUMP" >> $(DIAGFILE) + @echo "~~~~~~~~~~~~~" >> $(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -1|cut -d ' ' -f 1` -n $(OPRNAMESPACE) >>$(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -2|tail -1 | cut -d ' ' -f 1` -n $(OPRNAMESPACE) >>$(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|tail -1|cut -d ' ' -f 1` -n $(OPRNAMESPACE) >>$(DIAGFILE) + +####################################################### +#### TEST SECTION #### +####################################################### + +run00: + @$(call msg,"cdb pod creation") + - $(KUBECTL) delete cdb cdb-dev -n $(OPRNAMESPACE) + $(KUBECTL) apply -f $(ORDS_POD) + time $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" cdb cdb-dev -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"cdb pod completed") + $(KUBECTL) get cdb -n $(OPRNAMESPACE) + $(KUBECTL) get pod -n $(OPRNAMESPACE) + +run01.1: + @$(call msg,"pdb pdb1 creation") + $(KUBECTL) apply -f $(PDBCRE1) + time $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" pdb pdb1 -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "pdb pdb1 creation completed") + $(KUBECTL) get pdb pdb1 -n $(OPRNAMESPACE) + +run01.2: + @$(call msg, "pdb pdb2 creation") + $(KUBECTL) apply -f $(PDBCRE2) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" pdb pdb2 -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "pdb pdb2 creation completed") + $(KUBECTL) get pdb pdb2 -n $(OPRNAMESPACE) + +run02.1: + @$(call msg, "pdb pdb1 open") + $(KUBECTL) apply -f $(PDBOPEN1) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="READ WRITE" pdb pdb1 -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "pdb pdb1 open completed") + $(KUBECTL) get pdb pdb1 -n $(OPRNAMESPACE) + +run02.2: + @$(call msg,"pdb pdb2 open") + $(KUBECTL) apply -f $(PDBOPEN2) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="READ WRITE" pdb pdb2 -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"pdb pdb2 open completed") + $(KUBECTL) get pdb pdb2 -n $(OPRNAMESPACE) + + +run03.1: + @$(call msg,"clone pdb1-->pdb3") + $(KUBECTL) apply -f $(PDBCLONE1) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" pdb pdb3 -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"clone pdb1-->pdb3 completed") + $(KUBECTL) get pdb pdb3 -n $(OPRNAMESPACE) + + +run03.2: + @$(call msg,"clone pdb2-->pdb4") + $(KUBECTL) apply -f $(PDBCLONE2) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" pdb pdb4 -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"clone pdb2-->pdb4 completed") + $(KUBECTL) get pdb pdb3 -n $(OPRNAMESPACE) + + +run04.1: + @$(call msg,"pdb pdb1 close") + $(KUBECTL) apply -f $(PDBCLOSE1) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="MOUNTED" pdb pdb1 -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "pdb pdb1 close completed") + $(KUBECTL) get pdb pdb1 -n $(OPRNAMESPACE) + +run04.2: + @$(call msg,"pdb pdb2 close") + $(KUBECTL) apply -f $(PDBCLOSE2) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="MOUNTED" pdb pdb2 -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"pdb pdb2 close completed") + $(KUBECTL) get pdb pdb2 -n $(OPRNAMESPACE) + +run05.1: + @$(call msg,"pdb pdb1 unplug") + $(KUBECTL) apply -f $(PDBUNPLUG1) + $(KUBECTL) wait --for=delete pdb pdb1 -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"pdb pdb1 unplug completed") + +run06.1: + @$(call msg, "pdb pdb1 plug") + $(KUBECTL) apply -f $(PDBPLUG1) + $(KUBECTL) wait --for jsonpath='{.status.phase'}="Ready" pdb pdb1 -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg, "pdb pdb1 plug completed") + $(KUBECTL) get pdb pdb1 -n $(OPRNAMESPACE) + +run07.1: + @$(call msg,"pdb pdb1 delete ") + - $(KUBECTL) apply -f $(PDBCLOSE1) + $(KUBECTL) wait --for jsonpath='{.status.openMode'}="MOUNTED" pdb pdb1 -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + $(KUBECTL) apply -f $(PDBDELETE1) + $(KUBECTL) wait --for=delete pdb pdb1 -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + @$(call msg,"pdb pdb1 delete") + $(KUBECTL) get pdb -n $(OPRNAMESPACE) + +run99.1: + $(KUBECTL) delete cdb cdb-dev -n cdbnamespace + $(KUBECTL) wait --for=delete cdb cdb-dev -n $(OPRNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) + $(KUBECTL) get cdb -n cdbnamespaace + $(KUBECTL) get pdb -n pdbnamespaace + + +## SEQ | ACTION +## ----+---------------- +## 00 | create ords pod +## 01 | create pdb +## 02 | open pdb +## 03 | clone pdb +## 04 | close pdb +## 05 | unpug pdb +## 06 | plug pdb +## 07 | delete pdb (declarative) + + +runall01: run00 run01.1 run01.2 run03.1 run03.2 run04.1 run05.1 run06.1 run02.1 run07.1 + + +###### BUILD ORDS IMAGE ###### + +createimage: + $(RUNTIME) build -t $(IMAGE) $(ORDSIMGDIR) + +createimageproxy: + $(RUNTIME) build -t $(IMAGE) $(ORDSIMGDIR) --build-arg https_proxy=$(HTTPS_PROXY) --build-arg http_proxy=$(HTTP_PROXY) + +tagimage: + @echo "TAG IMAGE" + $(RUNTIME) tag $(IMAGE) $(ORDSIMG) + +push: + $(RUNTIME) push $(ORDSIMG) + + diff --git a/docs/multitenant/ords-based/usecase01/map_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase01/map_pdb1_resource.yaml new file mode 100644 index 00000000..18cb35b1 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/map_pdb1_resource.yaml @@ -0,0 +1,49 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + assertivePdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/map_pdb2_resource.yaml b/docs/multitenant/ords-based/usecase01/map_pdb2_resource.yaml new file mode 100644 index 00000000..85899597 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/map_pdb2_resource.yaml @@ -0,0 +1,49 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb2 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbprd" + assertivePdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/map_pdb3_resource.yaml b/docs/multitenant/ords-based/usecase01/map_pdb3_resource.yaml new file mode 100644 index 00000000..9c2c1cd3 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/map_pdb3_resource.yaml @@ -0,0 +1,49 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb3 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "new_clone" + assertivePdbDeletion: true + fileNameConversions: "NONE" + totalSize: "1G" + tempSize: "100M" + action: "Map" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/open_pdb1_resource.yaml b/docs/multitenant/ords-based/usecase01/open_pdb1_resource.yaml new file mode 100644 index 00000000..63a0a49c --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/open_pdb1_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbdev" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/open_pdb2_resource.yaml b/docs/multitenant/ords-based/usecase01/open_pdb2_resource.yaml new file mode 100644 index 00000000..8c4eed0d --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/open_pdb2_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb2 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "pdbprd" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/open_pdb3_resource.yaml b/docs/multitenant/ords-based/usecase01/open_pdb3_resource.yaml new file mode 100644 index 00000000..5f0e4b77 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/open_pdb3_resource.yaml @@ -0,0 +1,47 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb3 + namespace: oracle-database-operator-system + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "oracle-database-operator-system" + cdbName: "DB12" + pdbName: "new_clone" + action: "Modify" + pdbState: "OPEN" + modifyOption: "READ WRITE" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase01/oracle-database-operator-system_binding.yaml b/docs/multitenant/ords-based/usecase01/oracle-database-operator-system_binding.yaml new file mode 100644 index 00000000..79e44269 --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/oracle-database-operator-system_binding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: oracle-database-operator-oracle-database-operator-manager-rolebinding2 + namespace: oracle-database-operator-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system diff --git a/docs/multitenant/usecase01/oracle-database-operator.yaml b/docs/multitenant/ords-based/usecase01/oracle-database-operator.yaml similarity index 100% rename from docs/multitenant/usecase01/oracle-database-operator.yaml rename to docs/multitenant/ords-based/usecase01/oracle-database-operator.yaml diff --git a/docs/multitenant/ords-based/usecase01/parameters.txt b/docs/multitenant/ords-based/usecase01/parameters.txt new file mode 100644 index 00000000..0a7b394a --- /dev/null +++ b/docs/multitenant/ords-based/usecase01/parameters.txt @@ -0,0 +1,61 @@ + +######################## +## REST SERVER IMAGE ### +######################## + +ORDSIMG:_your_container_registry/ords-dboper:latest + +############################## +## TNS URL FOR CDB CREATION ## +############################## +TNSALIAS:"T H I S I S J U S T A N E X A M P L E ....(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS)))" + +########################################### +## ORDS PUBLIC USER ## +########################################### +ORDPWD:Change_me_please + +########################################### +## SYSPASSWORD ## +########################################### +SYSPWD:Change_me_please + +####################### +## HTTPS CREDENTIAL ### +####################### + +WBUSER:Change_me_please +WBPASS:Change_me_please + +##################### +## PDB ADMIN USER ### +##################### + +PDBUSR:Change_me_please +PDBPWD:Change_me_please + +##################### +## CDB ADMIN USER ### +##################### + +CDBUSR:C##DBAPI_CDB_ADMIN +CDBPWD:Change_me_please + +################### +### NAMESPACES #### +################### + +PDBNAMESPACE:pdbnamespace +CDBNAMESPACE:cdbnamespace + +#################### +### COMPANY NAME ### +#################### + +COMPANY:oracle + +#################### +### APIVERSION ### +#################### + +APIVERSION:v4 diff --git a/docs/multitenant/provisioning/singlenamespace/pdb_close.yaml b/docs/multitenant/ords-based/usecase01/pdb_close.yaml similarity index 100% rename from docs/multitenant/provisioning/singlenamespace/pdb_close.yaml rename to docs/multitenant/ords-based/usecase01/pdb_close.yaml diff --git a/docs/multitenant/provisioning/singlenamespace/pdb_create.yaml b/docs/multitenant/ords-based/usecase01/pdb_create.yaml similarity index 100% rename from docs/multitenant/provisioning/singlenamespace/pdb_create.yaml rename to docs/multitenant/ords-based/usecase01/pdb_create.yaml diff --git a/docs/multitenant/provisioning/singlenamespace/pdb_delete.yaml b/docs/multitenant/ords-based/usecase01/pdb_delete.yaml similarity index 100% rename from docs/multitenant/provisioning/singlenamespace/pdb_delete.yaml rename to docs/multitenant/ords-based/usecase01/pdb_delete.yaml diff --git a/docs/multitenant/usecase01/pdb_map.yaml b/docs/multitenant/ords-based/usecase01/pdb_map.yaml similarity index 100% rename from docs/multitenant/usecase01/pdb_map.yaml rename to docs/multitenant/ords-based/usecase01/pdb_map.yaml diff --git a/docs/multitenant/provisioning/singlenamespace/pdb_open.yaml b/docs/multitenant/ords-based/usecase01/pdb_open.yaml similarity index 100% rename from docs/multitenant/provisioning/singlenamespace/pdb_open.yaml rename to docs/multitenant/ords-based/usecase01/pdb_open.yaml diff --git a/docs/multitenant/usecase01/pdb_secret.yaml b/docs/multitenant/ords-based/usecase01/pdb_secret.yaml similarity index 100% rename from docs/multitenant/usecase01/pdb_secret.yaml rename to docs/multitenant/ords-based/usecase01/pdb_secret.yaml diff --git a/docs/multitenant/provisioning/singlenamespace/pdb_plug.yaml b/docs/multitenant/ords-based/usecase01/plug_pdb1_resource.yaml similarity index 63% rename from docs/multitenant/provisioning/singlenamespace/pdb_plug.yaml rename to docs/multitenant/ords-based/usecase01/plug_pdb1_resource.yaml index 77c00b9c..0e86e10c 100644 --- a/docs/multitenant/provisioning/singlenamespace/pdb_plug.yaml +++ b/docs/multitenant/ords-based/usecase01/plug_pdb1_resource.yaml @@ -1,8 +1,4 @@ -# -# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: PDB metadata: name: pdb1 @@ -15,13 +11,22 @@ spec: cdbName: "DB12" pdbName: "pdbdev" xmlFileName: "/tmp/pdb.xml" + action: "plug" fileNameConversions: "NONE" sourceFileNameConversions: "NONE" copyAction: "MOVE" totalSize: "1G" tempSize: "100M" - action: "Plug" assertivePdbDeletion: true + action: "Plug" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" pdbTlsKey: secret: secretName: "db-tls" @@ -36,11 +41,13 @@ spec: key: "ca.crt" webServerUser: secret: - secretName: "pdb1-secret" - key: "webserver_user" + secretName: "wbuser" + key: "e_wbuser.txt" webServerPwd: secret: - secretName: "pdb1-secret" - key: "webserver_pwd" - - + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/usecase01/server.csr b/docs/multitenant/ords-based/usecase01/server.csr similarity index 100% rename from docs/multitenant/usecase01/server.csr rename to docs/multitenant/ords-based/usecase01/server.csr diff --git a/docs/multitenant/usecase01/tde_secret.yaml b/docs/multitenant/ords-based/usecase01/tde_secret.yaml similarity index 100% rename from docs/multitenant/usecase01/tde_secret.yaml rename to docs/multitenant/ords-based/usecase01/tde_secret.yaml diff --git a/docs/multitenant/usecase01/tls.crt b/docs/multitenant/ords-based/usecase01/tls.crt similarity index 100% rename from docs/multitenant/usecase01/tls.crt rename to docs/multitenant/ords-based/usecase01/tls.crt diff --git a/docs/multitenant/usecase01/tls.key b/docs/multitenant/ords-based/usecase01/tls.key similarity index 100% rename from docs/multitenant/usecase01/tls.key rename to docs/multitenant/ords-based/usecase01/tls.key diff --git a/docs/multitenant/provisioning/singlenamespace/pdb_unplug.yaml b/docs/multitenant/ords-based/usecase01/unplug_pdb1_resource.yaml similarity index 59% rename from docs/multitenant/provisioning/singlenamespace/pdb_unplug.yaml rename to docs/multitenant/ords-based/usecase01/unplug_pdb1_resource.yaml index 085d337e..61fe915d 100644 --- a/docs/multitenant/provisioning/singlenamespace/pdb_unplug.yaml +++ b/docs/multitenant/ords-based/usecase01/unplug_pdb1_resource.yaml @@ -1,8 +1,4 @@ -# -# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: PDB metadata: name: pdb1 @@ -16,6 +12,14 @@ spec: pdbName: "pdbdev" xmlFileName: "/tmp/pdb.xml" action: "Unplug" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" pdbTlsKey: secret: secretName: "db-tls" @@ -30,10 +34,13 @@ spec: key: "ca.crt" webServerUser: secret: - secretName: "pdb1-secret" - key: "webserver_user" + secretName: "wbuser" + key: "e_wbuser.txt" webServerPwd: secret: - secretName: "pdb1-secret" - key: "webserver_pwd" - + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/usecase02/README.md b/docs/multitenant/ords-based/usecase02/README.md similarity index 69% rename from docs/multitenant/usecase02/README.md rename to docs/multitenant/ords-based/usecase02/README.md index c434271f..39978747 100644 --- a/docs/multitenant/usecase02/README.md +++ b/docs/multitenant/ords-based/usecase02/README.md @@ -13,37 +13,7 @@ > ☞ The examples of this folder are based on single namespace **oracle-database-operator-system** -This page explains how to plug and unplug database a pdb; it assumes that you have already configured a pluggable database (see [usecase01](../usecase01/README.md)) -The following table reports the parameters required to configure and use oracle multi tenant controller for pluggable database lifecycle management. - -| yaml file parameters | value | description /ords parameter | -|-------------- |--------------------------- |-------------------------------------------------| -| dbserver | or | [--db-hostname][1] | -| dbTnsurl | | [--db-custom-url/db.customURL][dbtnsurl] | -| port | | [--db-port][2] | -| cdbName | | Container Name | -| name | | Ords podname prefix in cdb.yaml | -| name | | pdb resource in pdb.yaml | -| ordsImage | /ords-dboper:latest|My public container registry | -| pdbName | | Pluggable database name | -| servicename | | [--db-servicename][3] | -| sysadmin_user | | [--admin-user][adminuser] | -| sysadmin_pwd | | [--password-stdin][pwdstdin] | -| cdbadmin_user | | [db.cdb.adminUser][1] | -| cdbadmin_pwd | | [db.cdb.adminUser.password][cdbadminpwd] | -| webserver_user| | [https user][http] NOT A DB USER | -| webserver_pwd | | [http user password][http] | -| ords_pwd | | [ORDS_PUBLIC_USER password][public_user] | -| pdbTlsKey | | [standalone.https.cert.key][key] | -| pdbTlsCrt | | [standalone.https.cert][cr] | -| pdbTlsCat | | certificate authority | -| xmlFileName | | path for the unplug and plug operation | -| srcPdbName | | name of the database to be cloned | -| fileNameConversions | | used for database cloning | -| tdeKeystorePath | | [tdeKeystorePath][tdeKeystorePath] | -| tdeExport | | [tdeExport] | -| tdeSecret | | [tdeSecret][tdeSecret] | -| tdePassword | | [tdeSecret][tdeSecret] | +This page explains how to plug and unplug database a pdb; it assumes that you have already configured a pluggable database (see [usecase01](../usecase01/README.md)). Check yaml parameters in the CRD tables in the main [README](../README.md) file. ```text @@ -127,27 +97,7 @@ spec: pdbName: "pdbdev" xmlFileName: "/tmp/pdbunplug.xml" action: "Unplug" - pdbTlsKey: - secret: - secretName: "db-tls" - key: "tls.key" - pdbTlsCrt: - secret: - secretName: "db-tls" - key: "tls.crt" - pdbTlsCat: - secret: - secretName: "db-ca" - key: "ca.crt" - webServerUser: - secret: - secretName: "pdb1-secret" - key: "webserver_user" - webServerPwd: - secret: - secretName: "pdb1-secret" - key: "webserver_pwd" - + [ secret sections ] ``` Close the pluggable database by applying the following yaml file **pdb_close.yaml** @@ -169,29 +119,10 @@ spec: cdbNamespace: "oracle-database-operator-system" cdbName: "DB12" pdbName: "pdbdev" - adminName: - secret: - secretName: "pdb1-secret" - key: "sysadmin_user" - adminPwd: - secret: - secretName: "pdb1-secret" - key: "sysadmin_pwd" - pdbTlsKey: - secret: - secretName: "db-tls" - key: "tls.key" - pdbTlsCrt: - secret: - secretName: "db-tls" - key: "tls.crt" - pdbTlsCat: - secret: - secretName: "db-ca" - key: "ca.crt" pdbState: "CLOSE" modifyOption: "IMMEDIATE" action: "Modify" + [secret section] ``` ```bash @@ -294,19 +225,7 @@ spec: totalSize: "1G" tempSize: "100M" action: "Plug" - pdbTlsKey: - secret: - secretName: "db-tls" - key: "tls.key" - pdbTlsCrt: - secret: - secretName: "db-tls" - key: "tls.crt" - pdbTlsCat: - secret: - secretName: "db-ca" - key: "ca.crt" - + [secrets section] ``` Apply **pdb_plug.yaml** @@ -390,27 +309,8 @@ spec: fileNameConversions: "NONE" totalSize: "UNLIMITED" tempSize: "UNLIMITED" - adminName: - secret: - secretName: "pdb1-secret" - key: "sysadmin_user" - adminPwd: - secret: - secretName: "pdb1-secret" - key: "sysadmin_pwd" - pdbTlsKey: - secret: - secretName: "db-tls" - key: "tls.key" - pdbTlsCrt: - secret: - secretName: "db-tls" - key: "tls.crt" - pdbTlsCat: - secret: - secretName: "db-ca" - key: "ca.crt" action: "Clone" + [secret section] ``` ```bash @@ -487,7 +387,7 @@ PDBDEV(3):Buffer Cache flush finished: 3 -You can use unplug and plug database with TDE; in order to do that you have to specify a key store path and create new kubernets secret for TDE using the following yaml file. **tde_secrete.yaml**. The procedure to unplug and plug database does not change apply the same file. +You can use unplug and plug database with TDE; in order to do that you have to specify a key store path and create new kubernets secret for TDE using the following yaml file. **tde_secrete.yaml**. ```yaml #tde_secret @@ -498,8 +398,8 @@ metadata: namespace: oracle-database-operator-system type: Opaque data: - tdepassword: "d2VsY29tZTEK" - tdesecret: "bW1hbHZlenoK" + tdepassword: "...." + tdesecret: "...." ``` ```bash @@ -525,7 +425,7 @@ spec: pdbName: "pdbdev" adminName: secret: - secretName: pdb1-secret + secretName: key: "sysadmin_user" adminPwd: secret: @@ -621,29 +521,3 @@ spec: -[1]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-E9625FAB-9BC8-468B-9FF9-443C88D76FA1:~:text=Table%202%2D2%20Command%20Options%20for%20Command%2DLine%20Interface%20Installation - -[2]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-E9625FAB-9BC8-468B-9FF9-443C88D76FA1:~:text=Table%202%2D2%20Command%20Options%20for%20Command%2DLine%20Interface%20Installation - -[3]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-DAA027FA-A4A6-43E1-B8DD-C92B330C2341:~:text=%2D%2Ddb%2Dservicename%20%3Cstring%3E - -[adminuser]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-A9AED253-4EEC-4E13-A0C4-B7CE82EC1C22:~:text=Table%202%2D6%20Command%20Options%20for%20Uninstall%20CLI - -[public_user]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/using-multitenant-architecture-oracle-rest-data-services.html#GUID-E64A141A-A71F-4979-8D33-C5F8496D3C19:~:text=Preinstallation%20Tasks%20for%20Oracle%20REST%20Data%20Services%20CDB%20Installation - -[key]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/about-REST-configuration-files.html#GUID-006F916B-8594-4A78-B500-BB85F35C12A0:~:text=standalone.https.cert.key - -[cr]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/about-REST-configuration-files.html#GUID-006F916B-8594-4A78-B500-BB85F35C12A0 - -[cdbadminpwd]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/about-REST-configuration-files.html#GUID-006F916B-8594-4A78-B500-BB85F35C12A0:~:text=Table%20C%2D1%20Oracle%20REST%20Data%20Services%20Configuration%20Settings - - -[pwdstdin]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-88479C84-CAC1-4133-A33E-7995A645EC05:~:text=default%20database%20pool.-,2.1.4.1%20Understanding%20Command%20Options%20for%20Command%2DLine%20Interface%20Installation,-Table%202%2D2 - -[http]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-BEECC057-A8F5-4EAB-B88E-9828C2809CD8:~:text=Example%3A%20delete%20%5B%2D%2Dglobal%5D-,user%20add,-Add%20a%20user - -[dbtnsurl]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/22.2/ordig/installing-and-configuring-oracle-rest-data-services.html#GUID-A9AED253-4EEC-4E13-A0C4-B7CE82EC1C22 - -[tdeKeystorePath]:https://docs.oracle.com/en/database/oracle/oracle-rest-data-services/21.4/orrst/op-database-pdbs-pdb_name-post.html - -[tdeSecret]:https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/ADMINISTER-KEY-MANAGEMENT.html#GUID-E5B2746F-19DC-4E94-83EC-A6A5C84A3EA9 diff --git a/docs/multitenant/ords-based/usecase02/pdb_clone.yaml b/docs/multitenant/ords-based/usecase02/pdb_clone.yaml new file mode 100644 index 00000000..5723f7c6 --- /dev/null +++ b/docs/multitenant/ords-based/usecase02/pdb_clone.yaml @@ -0,0 +1,50 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb3 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "new_clone" + srcPdbName: "pdbdev" + fileNameConversions: "NONE" + totalSize: "UNLIMITED" + tempSize: "UNLIMITED" + assertivePdbDeletion: true + action: "Clone" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/ords-based/usecase02/pdb_plug.yaml b/docs/multitenant/ords-based/usecase02/pdb_plug.yaml new file mode 100644 index 00000000..9eb5ed77 --- /dev/null +++ b/docs/multitenant/ords-based/usecase02/pdb_plug.yaml @@ -0,0 +1,53 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "plug" + fileNameConversions: "NONE" + sourceFileNameConversions: "NONE" + copyAction: "MOVE" + totalSize: "1G" + tempSize: "100M" + assertivePdbDeletion: true + action: "Plug" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/usecase02/pdb_plugtde.yaml b/docs/multitenant/ords-based/usecase02/pdb_plugtde.yaml similarity index 96% rename from docs/multitenant/usecase02/pdb_plugtde.yaml rename to docs/multitenant/ords-based/usecase02/pdb_plugtde.yaml index 17d84346..995be538 100644 --- a/docs/multitenant/usecase02/pdb_plugtde.yaml +++ b/docs/multitenant/ords-based/usecase02/pdb_plugtde.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: PDB metadata: name: pdb1 diff --git a/docs/multitenant/ords-based/usecase02/pdb_unplug.yaml b/docs/multitenant/ords-based/usecase02/pdb_unplug.yaml new file mode 100644 index 00000000..0036d5f7 --- /dev/null +++ b/docs/multitenant/ords-based/usecase02/pdb_unplug.yaml @@ -0,0 +1,46 @@ +apiVersion: database.oracle.com/v4 +kind: PDB +metadata: + name: pdb1 + namespace: pdbnamespace + labels: + cdb: cdb-dev +spec: + cdbResName: "cdb-dev" + cdbNamespace: "cdbnamespace" + cdbName: "DB12" + pdbName: "pdbdev" + xmlFileName: "/tmp/pdb.xml" + action: "Unplug" + adminName: + secret: + secretName: "pdbusr" + key: "e_pdbusr.txt" + adminPwd: + secret: + secretName: "pdbpwd" + key: "e_pdbpwd.txt" + pdbTlsKey: + secret: + secretName: "db-tls" + key: "tls.key" + pdbTlsCrt: + secret: + secretName: "db-tls" + key: "tls.crt" + pdbTlsCat: + secret: + secretName: "db-ca" + key: "ca.crt" + webServerUser: + secret: + secretName: "wbuser" + key: "e_wbuser.txt" + webServerPwd: + secret: + secretName: "wbpass" + key: "e_wbpass.txt" + pdbOrdsPrvKey: + secret: + secretName: "prvkey" + key: "privateKey" diff --git a/docs/multitenant/usecase02/pdb_unplugtde.yaml b/docs/multitenant/ords-based/usecase02/pdb_unplugtde.yaml similarity index 96% rename from docs/multitenant/usecase02/pdb_unplugtde.yaml rename to docs/multitenant/ords-based/usecase02/pdb_unplugtde.yaml index 4c26bffe..2eacc5b7 100644 --- a/docs/multitenant/usecase02/pdb_unplugtde.yaml +++ b/docs/multitenant/ords-based/usecase02/pdb_unplugtde.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 Kind: PDB metadata: name: pdb1 diff --git a/docs/multitenant/usecase02/tde_secret.yaml b/docs/multitenant/usecase02/tde_secret.yaml deleted file mode 100644 index d0186ff2..00000000 --- a/docs/multitenant/usecase02/tde_secret.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# -# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. -# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. -# - -apiVersion: v1 -kind: Secret -metadata: - name: tde1-secret - namespace: oracle-database-operator-system -type: Opaque -data: - tdepassword: "[base64 encode value]" - tdesecret: "[base64 encode value]" - diff --git a/docs/observability/README.md b/docs/observability/README.md index 986b1885..5a281c9c 100644 --- a/docs/observability/README.md +++ b/docs/observability/README.md @@ -2,99 +2,155 @@ Oracle Database Operator for Kubernetes (`OraOperator`) includes the Observability controller for Oracle Databases and adds the `DatabaseObserver` CRD, which enables users to observe -Oracle Databases by scraping database metrics using SQL queries. The controller +Oracle Databases by scraping database metrics using SQL queries and observe logs in the Database _alert.log_. The controller automates the deployment and maintenance of the metrics exporter container image, -metrics exporter service and a Prometheus servicemonitor. +metrics exporter service and Prometheus servicemonitor. The following sections explains the configuration and functionality of the controller. * [Prerequisites](#prerequisites) * [The DatabaseObserver Custom Resource Definition](#the-databaseobserver-custom-resource) -* [Configuration of DatabaseObservers](#configuration) + * [Configuration Options](#configuration-options) + * [Resources Managed by the Controller](#resources-managed-by-the-controller) +* [DatabaseObserver Operations](#databaseobserver-operations) * [Create](#create-resource) * [List](#list-resource) * [Get Status](#get-detailed-status) * [Update](#patch-resource) * [Delete](#delete-resource) +* [Configuration Options for Scraping Metrics](#scraping-metrics) + * [Custom Metrics Config](#custom-metrics-config) + * [Prometheus Release](#prometheus-release) +* [Configuration Options for Scraping Logs](#scraping-logs) + * [Custom Log Location with PersistentVolumes](#custom-log-location-with-persistentvolumes) + * [Example Working with Sidecars and Promtail](#working-with-sidecars-to-deploy-promtail) + * [Promtail Config Example](#Promtail-Config-Example) +* [Other Configuration Options](#other-configuration-options) + * [Labels](#labels) + * [Custom Exporter Image or Version](#custom-exporter-image-or-version) * [Mandatory Roles and Privileges](#mandatory-roles-and-privileges-requirements-for-observability-controller) * [Debugging and troubleshooting](#debugging-and-troubleshooting) +* [Known Issues](#known-issues) ## Prerequisites -The `DatabaseObserver` custom resource has the following pre-requisites: +The `DatabaseObserver` custom resource has the following prerequisites: 1. Prometheus and its `servicemonitor` custom resource definition must be installed on the cluster. - The Observability controller creates multiple Kubernetes resources that include - a Prometheus `servicemonitor`. In order for the controller + a Prometheus `servicemonitor`. For the controller to create ServiceMonitors, the ServiceMonitor custom resource must exist. -2. A pre-existing Oracle Database and the proper database grants and privileges. +2. A preexisting Oracle Database and the proper database grants and privileges. - The controller exports metrics through SQL queries that the user can control and specify through a _toml_ file. The necessary access privileges to the tables used in the queries are not provided and applied automatically. -### The DatabaseObserver Custom Resource -The Oracle Database Operator (__v1.1.0__) includes the Oracle Database Observability controller which automates -the deployment and setting up of the Oracle Database metrics exporter and the related resources to make Oracle databases observable. - -In the sample YAML file found in -[./config/samples/observability/databaseobserver.yaml](../../config/samples/observability/databaseobserver.yaml), -the databaseObserver custom resource offers the following properties to be configured: - -| Attribute | Type | Default | Required? | Example | -|-------------------------------------------------------|---------|-----------------|--------------|-----------------------------------------------------------------------| -| `spec.database.dbUser.key` | string | user | Optional | _username_ | -| `spec.database.dbUser.secret` | string | - | Yes | _db-secret_ | -| `spec.database.dbPassword.key` | string | password | Optional | _admin-password_ | -| `spec.database.dbPassword.secret` | string | - | Conditional | _db-secret_ | -| `spec.database.dbPassword.vaultOCID` | string | - | Conditional | _ocid1.vault.oc1..._ | -| `spec.database.dbPassword.vaultSecretName` | string | - | Conditional | _db-vault_ | -| `spec.database.dbWallet.secret` | string | - | Conditional | _devsec-oradevdb-wallet_ | -| `spec.database.dbConnectionString.key` | string | connection | Optional | _connection_ | -| `spec.database.dbConnectionString.secret` | string | - | Yes | _db-secretg_ | -| `spec.exporter.image` | string | - | Optional | _container-registry.oracle.com/database/observability-exporter:1.0.2_ | -| `spec.exporter.configuration.configmap.key` | string | config.toml | Optional | _config.toml_ | -| `spec.exporter.configuration.configmap.configmapName` | string | - | Optional | _devcm-oradevdb-config_ | -| `spec.exporter.service.port` | number | 9161 | Optional | _9161_ | -| `spec.prometheus.port` | string | metrics | Optional | _metrics_ | -| `spec.prometheus.labels` | map | app: obs-{name} | Optional | _app: oradevdb-apps_ | -| `spec.replicas` | number | 1 | Optional | _1_ | -| `spec.ociConfig.configMapName` | string | - | Conditional | _oci-cred_ | -| `spec.ociConfig.secretName` | string | - | Conditional | _oci-privatekey_ | - - - - - -### Configuration -The `databaseObserver` custom resource has the following fields for all configurations that are required: -* `spec.database.dbUser.secret` - secret containing the database username. The corresponding key can be any value but must match the key in the secret provided. -* `spec.database.dbPassword.secret` - secret containing the database password (if vault is NOT used). The corresponding key field can be any value but must match the key in the secret provided -* `spec.database.dbConnectionString.secret` - secret containing the database connection string. The corresponding key field can be any value but must match the key in the secret provided i - -If a database wallet is required to connect, the following field containing the secret is required: -* `spec.database.dbWallet.secret` - secret containing the database wallet. The filenames must be used as the keys - -If vault is used to store the database password instead, the following fields are required: +## The DatabaseObserver Custom Resource +The Oracle Database Operator (__v1.2.0__ or later) includes the Oracle Database Observability controller, which automates +the deployment and setting up of the Oracle Database exporter and the related resources to make Oracle Databases observable. + +In the example YAML file found in +[`./config/samples/observability/v4/databaseobserver.yaml`](../../config/samples/observability/v4/databaseobserver.yaml), +the databaseObserver custom resource provides the following configurable properties: + +| Attribute | Type | Default | Required? | Example | +|--------------------------------------------------------|--------|---------------------------------------------------------------------|:------------|-----------------------------------------------------------------------| +| `spec.database.dbUser.key` | string | user | Optional | _username_ | +| `spec.database.dbUser.secret` | string | - | Yes | _db-secret_ | +| `spec.database.dbPassword.key` | string | password | Optional | _admin-password_ | +| `spec.database.dbPassword.secret` | string | - | Conditional | _db-secret_ | +| `spec.database.dbPassword.vaultOCID` | string | - | Conditional | _ocid1.vault.oc1..._ | +| `spec.database.dbPassword.vaultSecretName` | string | - | Conditional | _db-vault_ | +| `spec.database.dbWallet.secret` | string | - | Conditional | _devsec-oradevdb-wallet_ | +| `spec.database.dbConnectionString.key` | string | connection | Optional | _connection_ | +| `spec.database.dbConnectionString.secret` | string | - | Yes | _db-secretg_ | +| `spec.sidecars` | array | - | Optional | - | +| `spec.sidecarVolumes` | array | - | Optional | - | +| `spec.exporter.deployment.securityContext` | object | | Optional | _ | +| `spec.exporter.deployment.env` | map | - | Optional | _DB_ROLE: "SYSDBA"_ | +| `spec.exporter.deployment.image` | string | container-registry.oracle.com/database/observability-exporter:1.5.1 | Optional | _container-registry.oracle.com/database/observability-exporter:1.3.0_ | +| `spec.exporter.deployment.args` | array | - | Optional | _[ "--log.level=info" ]_ | +| `spec.exporter.deployment.commands` | array | - | Optional | _[ "/oracledb_exporter" ]_ | +| `spec.exporter.deployment.labels` | map | - | Optional | _environment: dev_ | +| `spec.exporter.deployment.podTemplate.labels` | map | - | Optional | _environment: dev_ | +| `spec.exporter.deployment.podTemplate.securityContext` | object | - | Optional | _ | +| `spec.exporter.service.ports` | array | - | Optional | - | +| `spec.exporter.service.labels` | map | - | Optional | _environment: dev_ | | +| `spec.configuration.configMap.key` | string | config.toml | Optional | _config.toml_ | +| `spec.configuration.configMap.name` | string | - | Optional | _devcm-oradevdb-config_ | +| `spec.prometheus.serviceMonitor.labels` | map | - | Yes | _release: prometheus_ | +| `spec.prometheus.serviceMonitor.namespaceSelector` | - | - | Yes | - | +| `spec.prometheus.serviceMonitor.endpoints` | array | - | Optional | - | +| `spec.log.filename` | string | alert.log | Optional | _alert.log_ | +| `spec.log.path` | string | /log | Optional | _/log_ | +| `spec.log.volume.name` | string | log-volume | Optional | _my-persistent-volume_ | +| `spec.log.volume.persistentVolumeClaim.claimName` | string | - | Optional | _my-pvc_ | +| `spec.replicas` | number | 1 | Optional | _1_ | +| `spec.inheritLabels` | array | - | Optional | _- environment: dev_
- app.kubernetes.io/name: observer | +| `spec.ociConfig.configMapName` | string | - | Conditional | _oci-cred_ | +| `spec.ociConfig.secretName` | string | - | Conditional | _oci-privatekey_ | + + +### Configuration Options +The `databaseObserver` Custom resource has the following fields for all configurations that are required: +* `spec.database.dbUser.secret` - Secret containing the database username. The corresponding key can be any value but must match the key in the secret provided. +* `spec.database.dbPassword.secret` - Secret containing the database password (if `vault` is NOT used). The corresponding key field can be any value, but must match the key in the Secret provided +* `spec.database.dbConnectionString.secret` - Secret containing the database connection string. The corresponding key field can be any value but must match the key in the Secret provided +* `spec.prometheus.serviceMonitor.labels` - Custom labels to add to the service monitors labels. A label is required for your serviceMonitor to be discovered. This label must match what is set in the serviceMonitorSelector of your Prometheus configuration + +If a database wallet is required to connect, then the following field containing the wallet secret is required: +* `spec.database.dbWallet.secret` - Secret containing the database wallet. The filenames inside the wallet must be used as keys + +If vault is used to store the database password instead, then the following fields are required: * `spec.database.dbPassword.vaultOCID` - OCID of the vault used * `spec.database.dbPassword.vaultSecretName` - Name of the secret inside the desired vault -* `spec.ociConfig.configMapName` - holds the rest of the information of the OCI API signing key. The following keys must be used: `fingerprint`, `region`, `tenancy` and `user` -* `spec.ociConfig.secretName` - holds the private key of the OCI API signing key. The key to the file containing the user private key must be: `privatekey` - -The `databaseObserver` provides the remaining multiple fields that are optional: -* `spec.prometheus.labels` - labels to use for Service, ServiceMonitor and Deployment -* `spec.prometheus.port` - port to use for ServiceMonitor -* `spec.replicas` - number of replicas to deploy -* `spec.exporter.service.port` - port of service -* `spec.exporter.image` - image version of observability exporter to use - - +* `spec.ociConfig.configMapName` - Holds the rest of the information of the OCI API signing key. The following keys must be used: `fingerprint`, `region`, `tenancy` and `user` +* `spec.ociConfig.secretName` - Holds the private key of the OCI API signing key. The key to the file containing the user private key must be: `privatekey` + +The `databaseObserver` Resource provides the remaining multiple fields that are optional: +* `spec.prometheus.serviceMonitor.endpoints` - ServiceMonitor endpoints +* `spec.prometheus.serviceMonitor.namespaceSelector` - ServiceMonitor namespace selector +* `spec.sidecars` - List of containers to run as a sidecar container with the observability exporter container image +* `spec.sidecarVolumes` - Volumes of any sidecar containers +* `spec.log.path` - Custom path to create +* `spec.log.filename` - Custom filename for the log file +* `spec.log.volume.name` - Custom name for the log volume +* `spec.log.volume.persistentVolumeClaim.claimName` - A volume in which to place the log to be shared by the containers. If not specified, an EmptyDir is used by default. +* `spec.configuration.configMap.key` - Configuration filename inside the container and the configmap +* `spec.configuration.configMap.name` - Name of the `configMap` that holds the custom metrics configuration +* `spec.replicas` - Number of replicas to deploy +* `spec.exporter.service.ports` - Port number for the generated service to use +* `spec.exporter.service.labels` - Custom labels to add to service labels +* `spec.exporter.deployment.image` - Image version of observability exporter to use +* `spec.exporter.deployment.env` - Custom environment variables for the observability exporter +* `spec.exporter.deployment.labels` - Custom labels to add to deployment labels +* `spec.exporter.deployment.podTemplate.labels` - Custom labels to add to pod labels +* `spec.exporter.deployment.podTemplate.securityContext` - Configures pod securityContext +* `spec.exporter.deployment.args` - Additional arguments to provide the observability-exporter +* `spec.exporter.deployment.commands` - Commands to supply to the observability-exporter +* `spec.exporter.deployment.securityContext` - Configures container securityContext +* `spec.inheritLabels` - Keys of inherited labels from the databaseObserver resource. These labels are applied to generated resources. + +### Resources Managed by the Controller +When you create a `DatabaseObserver` resource, the controller creates and manages the following resources: + +1. __Deployment__ - The deployment will have the same name as the `databaseObserver` resource + - Deploys a container named `observability-exporter` + - The default container image version of the `container-registry.oracle.com/database/observability-exporter` supported is __[v1.5.1](https://github.com/oracle/oracle-db-appdev-monitoring/releases/tag/1.5.1)__ + +2. __Service__ - The service will have the same name as the databaseObserver + - The service is of type `ClusterIP` + +3. __Prometheus ServiceMonitor__ - The serviceMonitor will have the same name as the `databaseObserver` + +## DatabaseObserver Operations ### Create Resource -Follow the steps below to create a new databaseObserver resource object. +Follow the steps below to create a new `databaseObserver` resource object. -1. To begin, creating a databaseObserver requires you to create and provide kubernetes Secrets to provide connection details: +1. To begin, creating a `databaseObserver` requires you to create and provide Kubernetes Secrets to provide connection details: ```bash kubectl create secret generic db-secret \ --from-literal=username='username' \ @@ -102,24 +158,23 @@ kubectl create secret generic db-secret \ --from-literal=connection='dbsample_tp' ``` -2. (Conditional) Create a Kubernetes secret for the wallet (if a wallet is required to connect to the database). +2. (Conditional) Create a Kubernetes Secret for the wallet (if a wallet is required to connect to the database). -You can create this secret by using a command similar to the following example below. -If you are connecting to an Autunomous Database and the operator is used to manage the Oracle Autonomous Database, -a client wallet can also be downloaded as a secret through kubectl commands. You can find out how, [here](../../docs/adb/README.md#download-wallets). +You can create this Secret by using a command similar to the example that follows. +If you are connecting to an Autunomous Database, and the operator is used to manage the Oracle Autonomous Database, then a client wallet can also be downloaded as a Secret through `kubectl` commands. See the ADB README section on [Download Wallets](../../docs/adb/README.md#download-wallets). -Otherwise, you can create the wallet secret from a local directory containing the wallet files. +You can also choose to create the wallet secret from a local directory containing the wallet files: ```bash kubectl create secret generic db-wallet --from-file=wallet_dir ``` -3. Finally, update the databaseObserver manifest with the resources you have created. You can use the example manifest -inside config/samples/observability to specify and create your databaseObserver object with a +3. Finally, update the `databaseObserver` manifest with the resources you have created. You can use the example _minimal_ manifest +inside [config/samples/observability/v4](../../config/samples/observability/v4/databaseobserver_minimal.yaml) to specify and create your databaseObserver object with a YAML file. ```YAML # example -apiVersion: observability.oracle.com/v1alpha1 +apiVersion: observability.oracle.com/v4 kind: DatabaseObserver metadata: name: obs-sample @@ -139,6 +194,11 @@ spec: dbWallet: secret: db-wallet + + prometheus: + serviceMonitor: + labels: + release: prometheus ``` ```bash @@ -159,8 +219,8 @@ To obtain a quick status, use the following command as an example: ```sh $ kubectl get databaseobserver obs-sample -NAME EXPORTERCONFIG STATUS -obs-sample default READY +NAME EXPORTERCONFIG STATUS VERSION +obs-sample DEFAULT READY 1.5.1 ``` @@ -170,39 +230,323 @@ To obtain a more detailed status, use the following command as an example: kubectl describe databaseobserver obs-sample ``` -This provides details of the current state of your databaseObserver resource object. A successful -deployment of the databaseObserver resource object should display `READY` as the status and all conditions with a `True` -value for every ConditionType. +This command displays details of the current state of your `databaseObserver` resource object. A successful +deployment of the `databaseObserver` resource object should display `READY` as the status, and all conditions should display with a `True` value for every ConditionType. ### Patch Resource -The Observability controller currently supports updates for most of the fields in the manifest. An example of patching the databaseObserver resource is as follows: +The Observability controller currently supports updates for most of the fields in the manifest. The following is an example of patching the `databaseObserver` resource: ```bash -kubectl --type=merge -p '{"spec":{"exporter":{"image":"container-registry.oracle.com/database/observability-exporter:latest"}}}' patch databaseobserver obs-sample +kubectl --type=merge -p '{"spec":{"exporter":{"image":"container-registry.oracle.com/database/observability-exporter:1.5.0"}}}' patch databaseobserver obs-sample ``` -The fields listed below can be updated with the given example command: +### Delete Resource -* spec.exporter.image -* spec.exporter.configuration.configmap.configmapName -* spec.exporter.configuration.configmap.key -* spec.database.dbUser.secret -* spec.database.dbPassword.secret -* spec.database.dbConnectionString.secret -* spec.database.dbWallet.secret -* spec.ociConfig.configMapName -* spec.ociConfig.secretName -* spec.replicas -* spec.database.dbPassword.vaultOCID -* spec.database.dbPassword.vaultSecretName +To delete the `databaseObserver` custom resource and all related resources, use this command: +```bash +kubectl delete databaseobserver obs-sample +``` -### Delete Resource +## Scraping Metrics +The `databaseObserve`r resource deploys the Observability exporter container. This container connects to an Oracle Database and +scrapes metrics using SQL queries. By default, the exporter provides standard metrics, which are listed in the [official GitHub page of the Observability Exporter](https://github.com/oracle/oracle-db-appdev-monitoring?tab=readme-ov-file#standard-metrics). + +To define custom metrics in Oracle Database for scraping, a TOML file that lists your custom queries and properties is required. +The file will have metric sections with the following parts: +- a context +- a request, which contains the SQL query +- a map between the field(s) in the request and comment(s) + +For example, the code snippet that follows shows how you can define custom metrics: +```toml +[[metric]] +context = "test" +request = "SELECT 1 as value_1, 2 as value_2 FROM DUAL" +metricsdesc = { value_1 = "Simple example returning always 1.", value_2 = "Same but returning always 2." } +``` +This file produces the following entries: +``` +# HELP oracledb_test_value_1 Simple example returning always 1. +# TYPE oracledb_test_value_1 gauge +oracledb_test_value_1 1 +# HELP oracledb_test_value_2 Same but returning always 2. +# TYPE oracledb_test_value_2 gauge +oracledb_test_value_2 2 +``` -To delete the DatabaseObserver custom resource and all related resources: +You can find more information in the [__Custom Metrics__](https://github.com/oracle/oracle-db-appdev-monitoring?tab=readme-ov-file#custom-metrics) section of the Official GitHub page. + + +### Custom Metrics Config +When configuring a `databaseObserver` resource, you can use the field `spec.configuration.configMap` to provide a +custom metrics file as a `configMap`. + +You can create the `configMap` by running the following command: ```bash -kubectl delete databaseobserver obs-sample +kubectl create cm custom-metrics-cm --from-file=metrics.toml +``` + +Finally, when creating or updating a `databaseObserver` resource, if we assume using the example above, you can set the fields in your YAML file as follows: +```yaml +spec: + configuration: + configMap: + key: "metrics.toml" + name: "custom-metrics-cm" +``` + +### Prometheus Release +To enable your Prometheus configuration to find and include the `ServiceMonitor` created by the `databaseObserver` resource, the field `spec.prometheus.serviceMonitor.labels` is an __important__ and __required__ field. The label on the ServiceMonitor +must match the `spec.serviceMonitorSelector` field in your Prometheus configuration. + +```yaml + prometheus: + serviceMonitor: + labels: + release: stable +``` + +## Scraping Logs +Currently, the observability exporter provides the `alert.log` from Oracle Database, which provides important information about errors and exceptions during database operations. + +By default, the logs are stored in the pod filesystem, inside `/log/alert.log`. Note that the log can also be placed in a custom path with a custom filename, You can also place a volume available to multiple pods with the use of `PersistentVolumes` by specifying a `persistentVolumeClaim`. +Because the logs are stored in a file, scraping the logs must be pushed to a log aggregation system, such as _Loki_. +In the following example, `Promtail` is used as a sidecar container that ships the contents of local logs to the Loki instance. + + +To configure the `databaseObserver` resource with a sidecar, two fields can be used: +```yaml +spec: + sidecars: [] + sidecarVolumes: [] +``` + +You can find an example in the `samples` directory, which deploys a Promtail sidecar container as an example: +[`config/samples/observability/v4/databaseobserver_logs_promtail.yaml`](../../config/samples/observability/v4/databaseobserver_logs_promtail.yaml) + +### Custom Log Location with PersistentVolumes + +The fields `spec.log.filename` and `spec.log.path` enable you to configure a custom location and filename for the log. +Using a custom location enables you to control where to place the logfile, such as a `persistentVolume`. + +```yaml + log: + filename: "alert.log" + path: "/log" +``` + +To configure the `databaseObserver` resource to put the log file in a `persistentVolume`, you can set the following fields +in your `databaseObserver` YAML file. The field `spec.log.volume.name` is provided to control the name of the volume used +for the log, while the field `spec.log.volume.persistentVolumeClaim.claimName` is used to specify the claim to use. +These details can be used with any sidecar containers, or with other containers. + +If `spec.log.volume.persistentVolumeClaim.claimName` is not specified, then an `EmptyDir` volume is automatically used. + +> Important Note: the volume name must match all references of the volume, such as in any sidecar containers that use and mount this volume. + +```yaml + log: + volume: + name: my-log-volume + persistentVolumeClaim: + claimName: "my-pvc" +``` + +The security context defines privilege and access control settings for a pod container, If these privileges and access control settingrs need to be updated in the pod, then the same field is available on the `databaseObserver` spec. You can set this object under deployment: `spec.exporter.deployment.securityContext`. + +```yaml +spec: + exporter: + deployment: + runAsUser: 1000 +``` + +Configuring security context under the PodTemplate is also possible. You can set this object under: `spec.exporter.deployment.podTemplate.securityContext` + +```yaml +spec: + exporter: + deployment: + podTemplate: + securityContext: + supplementalGroups: [1000] +``` + + +### Working with Sidecars to deploy Promtail +The fields `spec.sidecars` and `spec.sidecarVolumes` provide the ability to deploy container images as a sidecar container +alongside the `observability-exporter` container. + +You can specify container images to deploy inside `spec.sidecars` as you would normally define a container in a deployment. The field +`spec.sidecars` is of an array of containers (`[]corev1.Container`). + +For example, to deploy a Grafana Promtail image, you can specify the container and its details as an element to the array, `spec.sidecars`. +```yaml + sidecars: + - name: promtail + image: grafana/promtail + args: + - -config.file=/etc/promtail/config.yaml + volumeMounts: + - name: promtail-config-volume + mountPath: /etc/promtail + - name: my-log-volume + mountPath: /log +``` + +> Important Note: Make sure the volumeMount name matches the actual name of the volumes referenced. In this case, `my-log-volume` is referenced in `spec.log.volume.name`. + +In the field `spec.sidecarVolumes`, you can specify and list the volumes you need in your sidecar containers. The field +`spec.sidecarVolumes` is an array of Volumes (`[]corev1.Volume`). + +For example, when deploying the Promtail container, you can specify in the field any volume that needs to be mounted in the sidecar container above. + +```yaml + sidecarVolumes: + - name: promtail-config-volume + configMap: + name: promtail-config-file +``` + +In this example, the `promtail-config-file` `configMap` contains the Promtail configuration, which specifies where to find +the target and the path to the file, as well as the endpoint where Loki is listening for any push API requests. + +__Promtail Config Example__ + +```yaml +# config.yaml +server: + http_listen_port: 9080 + grpc_listen_port: 0 +positions: + filename: /tmp/positions.yaml + +clients: + - url: http://{loki-endpoint}:3100/loki/api/v1/push + +scrape_configs: + - job_name: "alert-log" + static_configs: + - targets: + - localhost + labels: + app: {my-database-observer-label} + __path__: /log/*.log + ``` + +To create the `configmap`, you can run the following command: +```bash +kubectl create cm promtail-config-file --from-file=config.yaml +``` + + +## Other Configuration Options + +### Labels + +__About the Default Label__ - The resources created by the Observability Controller will automatically be labelled with: +- `app`: `` + + +For example, if the `databaseObserver` instance is named: `metrics-exporter`, then resources such as the deployment will be labelled +with `app: metrics-exporter`. This label `cannot be overwritten` as this label is used by multiple resources created. Selectors used by the deployment, service and servicemonitor use this label. + +The following configuration shows an example: + +```yaml +apiVersion: observability.oracle.com/v4 +kind: DatabaseObserver +metadata: + name: metrics-exporter + labels: + app: my-db-metrics + some: non-inherited-label +spec: + + # inheritLabels + inheritLabels: + - some + + # ... +``` + +Meanwhile, you can provide extra labels to the resources created by the `databaseObserver` controller, such as the Deployment, Pods, Service and ServiceMonitor. +```yaml +spec: + exporter: + deployment: + labels: + podTemplate: + labels: + service: + labels: + prometheus: + serviceMonitor: + labels: +``` + +### Custom Exporter Image or Version +The field `spec.exporter.deployment.image` is provided to enable you to make use of a newer or older version of the [observability-exporter](https://github.com/oracle/oracle-db-appdev-monitoring) +container image. + +```yaml +spec: + exporter: + deployment: + image: "container-registry.oracle.com/database/observability-exporter:1.5.3" +``` + +### Custom Environment Variables, Arguments and Commands +The fields `spec.exporter.deployment.env`, `spec.exporter.deployment.args` and `spec.exporter.deployment.commands` are provided for adding custom environment variables, arguments (`args`) and commands to the containers. +Any custom environment variable will overwrite environment variables set by the controller. + +```yaml +spec: + exporter: + deployment: + env: + DB_ROLE: "" + TNS_ADMIN: "" + args: + - "--log.level=info" + commands: + - "/oracledb_exporter" +``` + + +### Custom Service Ports +The field `spec.exporter.service.ports` is provided to enable setting the ports of the service. If not set, then the following definition is set by default. + +```yaml +spec: + exporter: + service: + ports: + - name: metrics + port: 9161 + targetPort: 9161 + +``` + +### Custom ServiceMonitor Endpoints +The field `spec.prometheus.serviceMonitor.endpoints` is provided for providing custom endpoints for the ServiceMonitor resource created by the `databaseObserver`: + +```yaml +spec: + prometheus: + serviceMonitor: + endpoints: + - bearerTokenSecret: + key: '' + interval: 20s + port: metrics + relabelings: + - action: replace + sourceLabels: + - __meta_kubernetes_endpoints_label_app + targetLabel: instance ``` ## Mandatory roles and privileges requirements for Observability Controller @@ -231,17 +575,17 @@ and gets and lists configmaps and secrets. ## Debugging and troubleshooting ### Show the details of the resource -To get the verbose output of the current spec, use the command below: +To obtain the verbose output of the current spec, use the following command: ```sh kubectl describe databaseobserver/database-observer-sample ``` -If any error occurs during the reconciliation loop, the Operator either reports -the error using the resource's event stream, or will show the error under conditions. +If any error occurs during the reconciliation loop, then the Operator either reports +the error using the resource's event stream, or it will show the error under conditions. ### Check the logs of the pod where the operator deploys -Follow the steps to check the logs. +Follow these steps to check the logs. 1. List the pod replicas @@ -249,8 +593,18 @@ Follow the steps to check the logs. kubectl get pods -n oracle-database-operator-system ``` -2. Use the below command to check the logs of the deployment +2. Use the following command to check the logs of the deployment ```sh kubectl logs deployment.apps/oracle-database-operator-controller-manager -n oracle-database-operator-system ``` + +## Known Potential Issues + +| Issue | Example error | Potential Workaround | +|---------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------| +| Pod may encounter error Permission denied when creating log file. Pod cannot access file system due to insufficient permissions | ```level=error msg="Failed to create the log file: /log/alert.log"``` | Configure securityContext in the spec, add your group ID to the `supplementalgroups` inside `spec.exporter.deployment.podTemplate.securityContext` field. | + + +## Resources +- [GitHub - Unified Observability for Oracle Database Project](https://github.com/oracle/oracle-db-appdev-monitoring) diff --git a/docs/ordsservices/README.md b/docs/ordsservices/README.md new file mode 100644 index 00000000..1740e99f --- /dev/null +++ b/docs/ordsservices/README.md @@ -0,0 +1,67 @@ +# Oracle Rest Data Services (ORDSSRVS) Controller for Kubernetes - ORDS Life cycle management + + +## Description + +The ORDSRVS controller extends the Kubernetes API with a Custom Resource (CR) and Controller for automating Oracle Rest Data +Services (ORDS) lifecycle management. Using the ORDS controller, you can easily migrate existing, or create new, ORDS implementations +into an existing Kubernetes cluster. + +This controller allows you to run what would otherwise be an On-Premises ORDS middle-tier, configured as you require, inside Kubernetes with the additional ability of the controller to perform automatic ORDS/APEX install/upgrades inside the database. + +## Features Summary + +The custom RestDataServices resource supports the following configurations as a Deployment, StatefulSet, or DaemonSet: + +* Single OrdsSrvs resource with one database pool +* Single OrdsSrvs resource with multiple database pools* +* Multiple OrdsSrvs resources, each with one database pool +* Multiple OrdsSrvs resources, each with multiple database pools* + +*See [Limitations](#limitations) + +It supports the majority of ORDS configuration settings as per the [API Documentation](./api.md). + +The ORDS and APEX schemas can be [automatically installed/upgraded](./autoupgrade.md) into the Oracle Database by the ORDS controller. + +ORDS Version support: +* v22.1+ + +Oracle Database Version: +* 19c +* 23ai (incl. 23ai Free) + + +### Common Configurations + +A few common configuration examples can be used to quickly familiarise yourself with the ORDS Custom Resource Definition. +The "Conclusion" section of each example highlights specific settings to enable functionality that maybe of interest. + +* [Containerised Single Instance Database using the Oracontroller](./examples/sidb_container.md) +* [Multipool, Multidatabase using a TNS Names file](./examples/multi_pool.md) +* [Autonomous Database using the Oracontroller](./examples/adb_oraoper.md) - (Customer Managed ORDS) *See [Limitations](#limitations) +* [Autonomous Database without the Oracontroller](./examples/adb.md) - (Customer Managed ORDS) +* [Oracle API for MongoDB Support](./examples/mongo_api.md) + +Running through all examples in the same Kubernetes cluster illustrates the ability to run multiple ORDS instances with a variety of different configurations. + +If you have a specific use-case that is not covered and would like it to be feel free to contribute it via a Pull Request. + +### Limitations + +When connecting to a mTLS enabled ADB and using the Oracontroller to retreive the Wallet, it is currently not supported to have multiple, different databases supported by the single RestDataServices resource. This is due to a requirement to set the `TNS_ADMIN` parameter at the Pod level ([#97](https://github.com/oracle/oracle-database-controller/issues/97)). + +### Troubleshooting +See [Troubleshooting](./TROUBLESHOOTING.md) + +## Contributing +See [Contributing to this Repository](./CONTRIBUTING.md) + +## Reporting a Security Issue + +See [Reporting security vulnerabilities](./SECURITY.md) + +## License + +Copyright (c) 2025 Oracle and/or its affiliates. +Released under the Universal Permissive License v1.0 as shown at [https://oss.oracle.com/licenses/upl/](https://oss.oracle.com/licenses/upl/) diff --git a/docs/ordsservices/TROUBLESHOOTING.md b/docs/ordsservices/TROUBLESHOOTING.md new file mode 100644 index 00000000..b1b5304d --- /dev/null +++ b/docs/ordsservices/TROUBLESHOOTING.md @@ -0,0 +1,129 @@ + + + +## TROUBLESHOOTING + +### Init container error + +Check the pod status and verify the init outcome + +---- +*Command:* +```bash +kubectl get pods -n +``` + +*Example:* +```bash +kubectl get pods -n ordsnamespace +NAME READY STATUS RESTARTS AGE +ords-multi-pool-55db776994-7rrff 0/1 Init:CrashLoopBackOff 6 (61s ago) 12m +``` +In case of error identify the *initContainer* name + +---- +*Command:* +```bash +kubectl get pod -n -o="custom-columns=NAME:.metadata.name,INIT-CONTAINERS:.spec.initContainers[*].name,CONTAINERS:.spec.containers[*].name" +``` + +Use the initContainers info to dump log information +**Command:** +```bash +kubectl logs -f --since=0 -n -c +``` + +*Example:* + +In this particular case we are providing wrong credential: "SYT" user does not exist + +```text +kubectl logs -f --since=0 ords-multi-pool-55db776994-m7782 -n ordsnamespace -c ords-multi-pool-init + +[..omissis...] +Running SQL... +Picked up JAVA_TOOL_OPTIONS: -Doracle.ml.version_check=false +BACKTRACE [24:09:17 08:59:03] + +filename:line function +------------- -------- +/opt/oracle/sa/bin/init_script.sh:115 run_sql +/opt/oracle/sa/bin/init_script.sh:143 check_adb +/opt/oracle/sa/bin/init_script.sh:401 main +SQLERROR: + USER = SYT + URL = jdbc:oracle:thin:@PDB2 + Error Message = 🔥ORA-01017: invalid username/password;🔥 logon denied +Pool: pdb2, Exit Code: 1 +Pool: pdb1, Exit Code: 1 +``` + +--- +*Diag shell* Use the following script to dump the container init log + +```bash +#!/bin/bash +NAMESPACE=${1:-"ordsnamespace"} +KUBECTL=/usr/bin/kubectl +for _pod in `${KUBECTL} get pods --no-headers -o custom-columns=":metadata.name" --no-headers -n ${NAMESPACE}` +do + for _podinit in `${KUBECTL} get pod ${_pod} -n ${NAMESPACE} -o="custom-columns=INIT-CONTAINERS:.spec.initContainers[*].name" --no-headers` + do + echo "DUMPINIT ${_pod}:${_podinit}" + ${KUBECTL} logs -f --since=0 ${_pod} -n ${NAMESPACE} -c ${_podinit} + done +done +``` + +## Ords init error + +Get pod name + +*Command:* +```bash +kubectl get pods -n +``` + +*Example:* +``` +kubectl get pods -n ordsnamespace +NAME READY STATUS RESTARTS AGE +ords-multi-pool-55db776994-m7782 1/1 Running 0 2m51s +``` +---- +Dump ords log + +*Commands:* +```bash +kubectl logs --since=0 -n +``` +*Example:* +```text +kubectl logs --since=0 ords-multi-pool-55db776994-m7782 -n ordsnamespace +[..omissis..] +2024-09-17T09:47:39.227Z WARNING The pool named: |pdb2|lo| is invalid and will be ignored: ORDS was unable to make a connection to the database. The database user specified by db.username configuration setting is locked. The connection pool named: |pdb2|lo| had the following error(s): 🔥ORA-28000: The account is locked.🔥 + +2024-09-17T09:47:39.370Z WARNING The pool named: |pdb1|lo| is invalid and will be ignored: ORDS was unable to make a connection to the database. The database user specified by db.username configuration setting is locked. The connection pool named: |pdb1|lo| had the following error(s): 🔥ORA-28000: The account is locked.🔥 + +2024-09-17T09:47:39.375Z INFO + +Mapped local pools from /opt/oracle/sa/config/databases: + /ords/pdb1/ => pdb1 => INVALID + /ords/pdb2/ => pdb2 => INVALID + + +2024-09-17T09:47:39.420Z INFO Oracle REST Data Services initialized +Oracle REST Data Services version : 24.1.1.r1201228 +Oracle REST Data Services server info: jetty/10.0.20 +Oracle REST Data Services java info: Java HotSpot(TM) 64-Bit Server VM 11.0.15+8-LTS-149 +``` + +*Solution:* Connect to the container db to unlock the account + +```sql +alter user ORDS_PUBLIC_USER account unlock; +``` + + + + diff --git a/docs/ordsservices/api.md b/docs/ordsservices/api.md new file mode 100644 index 00000000..da4db09c --- /dev/null +++ b/docs/ordsservices/api.md @@ -0,0 +1,1388 @@ +# API Reference + +Packages: + +- [database.oracle.com/v1](#databaseoraclecomv1) + +# database.oracle.com/v1 + +Resource Types: + +- [OrdsSrvs](#ordssrvs) + + + + +## OrdsSrvs +[↩ Parent](#databaseoraclecomv1 ) + + + + + + +OrdsSrvs is the Schema for the ordssrvs API + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
apiVersionstringdatabase.oracle.com/v1true
kindstringOrdsSrvstrue
metadataobjectRefer to the Kubernetes API documentation for the fields of the `metadata` field.true
specobject + OrdsSrvsSpec defines the desired state of OrdsSrvs
+
false
statusobject + OrdsSrvsStatus defines the observed state of OrdsSrvs
+
false
+ + +### OrdsSrvs.spec +[↩ Parent](#ordssrvs) + + + +OrdsSrvsSpec defines the desired state of OrdsSrvs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
globalSettingsobject Contains settings that are configured across the entire +ORDS instance.
+
true
imagestring Specifies the ORDS container image
+
true
forceRestartboolean Specifies whether to restart pods when Global or Pool +configurations change
+
false
imagePullPolicyenum Specifies the ORDS container image pull policy
+
+Enum: IfNotPresent, Always, Never
+Default: IfNotPresent
+
false
imagePullSecretsstring Specifies the Secret Name for pulling the ORDS container +image
+
false
poolSettings< +a>[]object Contains settings for individual pools/databases
+
false
replicasinteger Defines the number of desired Replicas when workloadType +Deployment or StatefulSet
+
+Format: int32
+Default: 1
+Minimum: 1
+
false
workloadTypeenum Specifies the desired Kubernetes Workload
+
+Enum: Deployment, StatefulSet, DaemonSet
+Default: Deployment
+
false
encPrivKey
+
secret
+
secretName: string  passwordKey: +string Define the private key to decrypt passwords
+
true
+
+ +### OrdsSrvs.spec.globalSettings +[↩ Parent](#ordssrvsspec) + + + +Contains settings that are configured across the entire ORDS instance. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
cache.metadata.enabledboolean + Specifies the setting to enable or disable metadata caching.
+
false
cache.metadata.graphql.expireAfterAccessinteger + Specifies the duration after a GraphQL schema is not accessed from the cache that it expires.
+
+ Format: int64
+
false
cache.metadata.graphql.expireAfterWriteinteger + Specifies the duration after a GraphQL schema is cached that it expires and has to be loaded again.
+
+ Format: int64
+
false
cache.metadata.jwks.enabledboolean + Specifies the setting to enable or disable JWKS caching.
+
false
cache.metadata.jwks.expireAfterAccessinteger + Specifies the duration after a JWK is not accessed from the cache that it expires. By default this is disabled.
+
+ Format: int64
+
false
cache.metadata.jwks.expireAfterWriteinteger + Specifies the duration after a JWK is cached, that is, it expires and has to be loaded again.
+
+ Format: int64
+
false
cache.metadata.jwks.initialCapacityinteger + Specifies the initial capacity of the JWKS cache.
+
+ Format: int32
+
false
cache.metadata.jwks.maximumSizeinteger + Specifies the maximum capacity of the JWKS cache.
+
+ Format: int32
+
false
cache.metadata.timeoutinteger + Specifies the setting to determine for how long a metadata record remains in the cache. Longer duration means, it takes longer to view the applied changes. The formats accepted are based on the ISO-8601 duration format.
+
+ Format: int64
+
false
certSecretobject + Specifies the Secret containing the SSL Certificates Replaces: standalone.https.cert and standalone.https.cert.key
+
false
database.api.enabledboolean + Specifies whether the Database API is enabled.
+
false
database.api.management.services.disabledboolean + Specifies to disable the Database API administration related services. Only applicable when Database API is enabled.
+
false
db.invalidPoolTimeoutinteger + Specifies how long to wait before retrying an invalid pool.
+
+ Format: int64
+
false
debug.printDebugToScreenboolean + Specifies whether to display error messages on the browser.
+
false
enable.mongo.access.logboolean + Specifies if HTTP request access logs should be enabled If enabled, logs will be written to /opt/oracle/sa/log/global
+
+ Default: false
+
false
enable.standalone.access.logboolean + Specifies if HTTP request access logs should be enabled If enabled, logs will be written to /opt/oracle/sa/log/global
+
+ Default: false
+
false
error.responseFormatstring + Specifies how the HTTP error responses must be formatted. html - Force all responses to be in HTML format json - Force all responses to be in JSON format auto - Automatically determines most appropriate format for the request (default).
+
false
feature.grahpql.max.nesting.depthinteger + Specifies the maximum join nesting depth limit for GraphQL queries.
+
+ Format: int32
+
false
icap.portinteger + Specifies the Internet Content Adaptation Protocol (ICAP) port to virus scan files. Either icap.port or icap.secure.port are required to have a value.
+
+ Format: int32
+
false
icap.secure.portinteger + Specifies the Internet Content Adaptation Protocol (ICAP) port to virus scan files. Either icap.port or icap.secure.port are required to have a value. If values for both icap.port and icap.secure.port are provided, then the value of icap.port is ignored.
+
+ Format: int32
+
false
icap.serverstring + Specifies the Internet Content Adaptation Protocol (ICAP) server name or IP address to virus scan files. The icap.server is required to have a value.
+
false
log.procedureboolean + Specifies whether procedures are to be logged.
+
false
mongo.enabledboolean + Specifies to enable the API for MongoDB.
+
false
mongo.idle.timeoutinteger + Specifies the maximum idle time for a Mongo connection in milliseconds.
+
+ Format: int64
+
false
mongo.op.timeoutinteger + Specifies the maximum time for a Mongo database operation in milliseconds.
+
+ Format: int64
+
false
mongo.portinteger + Specifies the API for MongoDB listen port.
+
+ Format: int32
+ Default: 27017
+
false
request.traceHeaderNamestring + Specifies the name of the HTTP request header that uniquely identifies the request end to end as it passes through the various layers of the application stack. In Oracle this header is commonly referred to as the ECID (Entity Context ID).
+
false
security.credentials.attemptsinteger + Specifies the maximum number of unsuccessful password attempts allowed. Enabled by setting a positive integer value.
+
+ Format: int32
+
false
security.credentials.lock.timeinteger + Specifies the period to lock the account that has exceeded maximum attempts.
+
+ Format: int64
+
false
security.disableDefaultExclusionListboolean + If this value is set to true, then the Oracle REST Data Services internal exclusion list is not enforced. Oracle recommends that you do not set this value to true.
+
false
security.exclusionListstring + Specifies a pattern for procedures, packages, or schema names which are forbidden to be directly executed from a browser.
+
false
security.externalSessionTrustedOriginsstring + Specifies to trust Access from originating domains
+
false
security.forceHTTPSboolean + Specifies to force HTTPS; this is set to default to false as in real-world TLS should terminiate at the LoadBalancer
+
false
security.httpsHeaderCheckstring + Specifies that the HTTP Header contains the specified text Usually set to 'X-Forwarded-Proto: https' coming from a load-balancer
+
false
security.inclusionListstring + Specifies a pattern for procedures, packages, or schema names which are allowed to be directly executed from a browser.
+
false
security.maxEntriesinteger + Specifies the maximum number of cached procedure validations. Set this value to 0 to force the validation procedure to be invoked on each request.
+
+ Format: int32
+
false
security.verifySSLboolean + Specifies whether HTTPS is available in your environment.
+
false
standalone.context.pathstring + Specifies the context path where ords is located.
+
+ Default: /ords
+
false
standalone.http.portinteger + Specifies the HTTP listen port.
+
+ Format: int32
+ Default: 8080
+
false
standalone.https.hoststring + Specifies the SSL certificate hostname.
+
false
standalone.https.portinteger + Specifies the HTTPS listen port.
+
+ Format: int32
+ Default: 8443
+
false
standalone.stop.timeoutinteger + Specifies the period for Standalone Mode to wait until it is gracefully shutdown.
+
+ Format: int64
+
false
+ + +### OrdsSrvs.spec.globalSettings.certSecret +[↩ Parent](#ordssrvsspecglobalsettings) + + + +Specifies the Secret containing the SSL Certificates Replaces: standalone.https.cert and standalone.https.cert.key + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
certstring + Specifies the Certificate
+
true
keystring + Specifies the Certificate Key
+
true
secretNamestring + Specifies the name of the certificate Secret
+
true
+ + +### OrdsSrvs.spec.poolSettings[index] +[↩ Parent](#ordssrvsspec) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
db.secretobject + Specifies the Secret with the dbUsername and dbPassword values for the connection.
+
true
poolNamestring + Specifies the Pool Name
+
true
apex.security.administrator.rolesstring + Specifies the comma delimited list of additional roles to assign authenticated APEX administrator type users.
+
false
apex.security.user.rolesstring + Specifies the comma delimited list of additional roles to assign authenticated regular APEX users.
+
false
autoUpgradeAPEXboolean + Specify whether to perform APEX installation/upgrades automatically The db.adminUser and db.adminUser.secret must be set, otherwise setting is ignored This setting will be ignored for ADB
+
+ Default: false
+
false
autoUpgradeORDSboolean + Specify whether to perform ORDS installation/upgrades automatically The db.adminUser and db.adminUser.secret must be set, otherwise setting is ignored This setting will be ignored for ADB
+
+ Default: false
+
false
db.adminUserstring + Specifies the username for the database account that ORDS uses for administration operations in the database.
+
false
db.adminUser.secretobject + Specifies the Secret with the dbAdminUser (SYS) and dbAdminPassword values for the database account that ORDS uses for administration operations in the database. replaces: db.adminUser.password
+
false
db.cdb.adminUserstring + Specifies the username for the database account that ORDS uses for the Pluggable Database Lifecycle Management.
+
false
db.cdb.adminUser.secretobject + Specifies the Secret with the dbCdbAdminUser (SYS) and dbCdbAdminPassword values Specifies the username for the database account that ORDS uses for the Pluggable Database Lifecycle Management. Replaces: db.cdb.adminUser.password
+
false
db.connectionTypeenum + The type of connection.
+
+ Enum: basic, tns, customurl
+
false
db.credentialsSourceenum + Specifies the source for database credentials when creating a direct connection for running SQL statements. Value can be one of pool or request. If the value is pool, then the credentials defined in this pool is used to create a JDBC connection. If the value request is used, then the credentials in the request is used to create a JDBC connection and if successful, grants the requestor SQL Developer role.
+
+ Enum: pool, request
+
false
db.customURLstring + Specifies the JDBC URL connection to connect to the database.
+
false
db.hostnamestring + Specifies the host system for the Oracle database.
+
false
db.poolDestroyTimeoutinteger + Indicates how long to wait to gracefully destroy a pool before moving to forcefully destroy all connections including borrowed ones.
+
+ Format: int64
+
false
db.portinteger + Specifies the database listener port.
+
+ Format: int32
+
false
db.servicenamestring + Specifies the network service name of the database.
+
false
db.sidstring + Specifies the name of the database.
+
false
db.tnsAliasNamestring + Specifies the TNS alias name that matches the name in the tnsnames.ora file.
+
false
db.usernamestring + Specifies the name of the database user for the connection. For non-ADB this will default to ORDS_PUBLIC_USER For ADBs this must be specified and not ORDS_PUBLIC_USER If ORDS_PUBLIC_USER is specified for an ADB, the workload will fail
+
+ Default: ORDS_PUBLIC_USER
+
false
db.wallet.zip.servicestring + Specifies the service name in the wallet archive for the pool.
+
false
dbWalletSecretobject + Specifies the Secret containing the wallet archive containing connection details for the pool. Replaces: db.wallet.zip
+
false
debug.trackResourcesboolean + Specifies to enable tracking of JDBC resources. If not released causes in resource leaks or exhaustion in the database. Tracking imposes a performance overhead.
+
false
feature.openservicebroker.excludeboolean + Specifies to disable the Open Service Broker services available for the pool.
+
false
feature.sdwboolean + Specifies to enable the Database Actions feature.
+
false
http.cookie.filterstring + Specifies a comma separated list of HTTP Cookies to exclude when initializing an Oracle Web Agent environment.
+
false
jdbc.DriverTypeenum + Specifies the JDBC driver type.
+
+ Enum: thin, oci8
+
false
jdbc.InactivityTimeoutinteger + Specifies how long an available connection can remain idle before it is closed. The inactivity connection timeout is in seconds.
+
+ Format: int32
+
false
jdbc.InitialLimitinteger + Specifies the initial size for the number of connections that will be created. The default is low, and should probably be set higher in most production environments.
+
+ Format: int32
+
false
jdbc.MaxConnectionReuseCountinteger + Specifies the maximum number of times to reuse a connection before it is discarded and replaced with a new connection.
+
+ Format: int32
+
false
jdbc.MaxConnectionReuseTimeinteger + Sets the maximum connection reuse time property.
+
+ Format: int32
+
false
jdbc.MaxLimitinteger + Specifies the maximum number of connections. Might be too low for some production environments.
+
+ Format: int32
+
false
jdbc.MaxStatementsLimitinteger + Specifies the maximum number of statements to cache for each connection.
+
+ Format: int32
+
false
jdbc.MinLimitinteger + Specifies the minimum number of connections.
+
+ Format: int32
+
false
jdbc.SecondsToTrustIdleConnectioninteger + Sets the time in seconds to trust an idle connection to skip a validation test.
+
+ Format: int32
+
false
jdbc.auth.admin.rolestring + Identifies the database role that indicates that the database user must get the SQL Administrator role.
+
false
jdbc.auth.enabledboolean + Specifies if the PL/SQL Gateway calls can be authenticated using database users. If the value is true then this feature is enabled. If the value is false, then this feature is disabled. Oracle recommends not to use this feature. This feature used only to facilitate customers migrating from mod_plsql.
+
false
jdbc.cleanup.modestring + Specifies how a pooled JDBC connection and corresponding database session, is released when a request has been processed.
+
false
jdbc.statementTimeoutinteger + Specifies a timeout period on a statement. An abnormally long running query or script, executed by a request, may leave it in a hanging state unless a timeout is set on the statement. Setting a timeout on the statement ensures that all the queries automatically timeout if they are not completed within the specified time period.
+
+ Format: int32
+
false
misc.defaultPagestring + Specifies the default page to display. The Oracle REST Data Services Landing Page.
+
false
misc.pagination.maxRowsinteger + Specifies the maximum number of rows that will be returned from a query when processing a RESTful service and that will be returned from a nested cursor in a result set. Affects all RESTful services generated through a SQL query, regardless of whether the resource is paginated.
+
+ Format: int32
+
false
owa.trace.sqlboolean + If it is true, then it causes a trace of the SQL statements performed by Oracle Web Agent to be echoed to the log.
+
false
plsql.gateway.modeenum + Indicates if the PL/SQL Gateway functionality should be available for a pool or not. Value can be one of disabled, direct, or proxied. If the value is direct, then the pool serves the PL/SQL Gateway requests directly. If the value is proxied, the PLSQL_GATEWAY_CONFIG view is used to determine the user to whom to proxy.
+
+ Enum: disabled, direct, proxied
+
false
procedure.preProcessstring + Specifies the procedure name(s) to execute prior to executing the procedure specified on the URL. Multiple procedure names must be separated by commas.
+
false
procedure.rest.preHookstring + Specifies the function to be invoked prior to dispatching each Oracle REST Data Services based REST Service. The function can perform configuration of the database session, perform additional validation or authorization of the request. If the function returns true, then processing of the request continues. If the function returns false, then processing of the request is aborted and an HTTP 403 Forbidden status is returned.
+
false
procedurePostProcessstring + Specifies the procedure name(s) to execute after executing the procedure specified on the URL. Multiple procedure names must be separated by commas.
+
false
restEnabledSql.activeboolean + Specifies whether the REST-Enabled SQL service is active.
+
false
security.jwks.connection.timeoutinteger + Specifies the maximum amount of time before timing-out when accessing a JWK url.
+
+ Format: int64
+
false
security.jwks.read.timeoutinteger + Specifies the maximum amount of time reading a response from the JWK url before timing-out.
+
+ Format: int64
+
false
security.jwks.refresh.intervalinteger + Specifies the minimum interval between refreshing the JWK cached value.
+
+ Format: int64
+
false
security.jwks.sizeinteger + Specifies the maximum number of bytes read from the JWK url.
+
+ Format: int32
+
false
security.jwt.allowed.ageinteger + Specifies the maximum allowed age of a JWT in seconds, regardless of expired claim. The age of the JWT is taken from the JWT issued at claim.
+
+ Format: int64
+
false
security.jwt.allowed.skewinteger + Specifies the maximum skew the JWT time claims are accepted. This is useful if the clock on the JWT issuer and ORDS differs by a few seconds.
+
+ Format: int64
+
false
security.jwt.profile.enabledboolean + Specifies whether the JWT Profile authentication is available. Supported values:
+
false
security.requestAuthenticationFunctionstring + Specifies an authentication function to determine if the requested procedure in the URL should be allowed or disallowed for processing. The function should return true if the procedure is allowed; otherwise, it should return false. If it returns false, Oracle REST Data Services will return WWW-Authenticate in the response header.
+
false
security.requestValidationFunctionstring + Specifies a validation function to determine if the requested procedure in the URL should be allowed or disallowed for processing. The function should return true if the procedure is allowed; otherwise, return false.
+
+ Default: ords_util.authorize_plsql_gateway
+
false
security.validationFunctionTypeenum + Indicates the type of security.requestValidationFunction: javascript or plsql.
+
+ Enum: plsql, javascript
+
false
soda.defaultLimitstring + When using the SODA REST API, specifies the default number of documents returned for a GET request on a collection when a limit is not specified in the URL. Must be a positive integer, or "unlimited" for no limit.
+
false
soda.maxLimitstring + When using the SODA REST API, specifies the maximum number of documents that will be returned for a GET request on a collection URL, regardless of any limit specified in the URL. Must be a positive integer, or "unlimited" for no limit.
+
false
tnsAdminSecretobject + Specifies the Secret containing the TNS_ADMIN directory Replaces: db.tnsDirectory
+
false
+ + +### OrdsSrvs.spec.poolSettings[index].db.secret +[↩ Parent](#ordssrvsspecpoolsettingsindex) + + + +Specifies the Secret with the dbUsername and dbPassword values for the connection. + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
secretNamestring + Specifies the name of the password Secret
+
true
passwordKeystring + Specifies the key holding the value of the Secret
+
+ Default: password
+
false
+ + +### OrdsSrvs.spec.poolSettings[index].db.adminUser.secret +[↩ Parent](#ordssrvsspecpoolsettingsindex) + + + +Specifies the Secret with the dbAdminUser (SYS) and dbAdminPassword values for the database account that ORDS uses for administration operations in the database. replaces: db.adminUser.password + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
secretNamestring + Specifies the name of the password Secret
+
true
passwordKeystring + Specifies the key holding the value of the Secret
+
+ Default: password
+
false
+ + +### OrdsSrvs.spec.poolSettings[index].db.cdb.adminUser.secret +[↩ Parent](#ordssrvsspecpoolsettingsindex) + + + +Specifies the Secret with the dbCdbAdminUser (SYS) and dbCdbAdminPassword values Specifies the username for the database account that ORDS uses for the Pluggable Database Lifecycle Management. Replaces: db.cdb.adminUser.password + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
secretNamestring + Specifies the name of the password Secret
+
true
passwordKeystring + Specifies the key holding the value of the Secret
+
+ Default: password
+
false
+ + +### OrdsSrvs.spec.poolSettings[index].dbWalletSecret +[↩ Parent](#ordssrvsspecpoolsettingsindex) + + + +Specifies the Secret containing the wallet archive containing connection details for the pool. Replaces: db.wallet.zip + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
secretNamestring + Specifies the name of the Database Wallet Secret
+
true
walletNamestring + Specifies the Secret key name containing the Wallet
+
true
+ + +### OrdsSrvs.spec.poolSettings[index].tnsAdminSecret +[↩ Parent](#ordssrvsspecpoolsettingsindex) + + + +Specifies the Secret containing the TNS_ADMIN directory Replaces: db.tnsDirectory + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
secretNamestring + Specifies the name of the TNS_ADMIN Secret
+
true
+ + +### OrdsSrvs.status +[↩ Parent](#ordssrvs) + + + +OrdsSrvsStatus defines the observed state of OrdsSrvs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
restartRequiredboolean + Indicates if the resource is out-of-sync with the configuration
+
true
conditions[]object +
+
false
httpPortinteger + Indicates the HTTP port of the resource exposed by the pods
+
+ Format: int32
+
false
httpsPortinteger + Indicates the HTTPS port of the resource exposed by the pods
+
+ Format: int32
+
false
mongoPortinteger + Indicates the MongoAPI port of the resource exposed by the pods (if enabled)
+
+ Format: int32
+
false
ordsVersionstring + Indicates the ORDS version
+
false
statusstring + Indicates the current status of the resource
+
false
workloadTypestring + Indicates the current Workload type of the resource
+
false
+ + +### OrdsSrvs.status.conditions[index] +[↩ Parent](#ordssrvsstatus) + + + +Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + // other fields } + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
lastTransitionTimestring + lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+
+ Format: date-time
+
true
messagestring + message is a human readable message indicating details about the transition. This may be an empty string.
+
true
reasonstring + reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.
+
true
statusenum + status of the condition, one of True, False, Unknown.
+
+ Enum: True, False, Unknown
+
true
typestring + type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+
true
observedGenerationinteger + observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.
+
+ Format: int64
+ Minimum: 0
+
false
diff --git a/docs/ordsservices/autoupgrade.md b/docs/ordsservices/autoupgrade.md new file mode 100644 index 00000000..fddc30b3 --- /dev/null +++ b/docs/ordsservices/autoupgrade.md @@ -0,0 +1,57 @@ +# AutoUpgrade + +Each pool can be configured to automatically install and upgrade the ORDS and/or APEX schemas in the database. +The ORDS and APEX version is based on the ORDS image used for the RestDataServices resource. + +For example, in the below manifest: +* `Pool: pdb1` is configured to automatically install/ugrade both ORDS and APEX to version 24.1.0 +* `Pool: pdb2` will not install or upgrade ORDS/APEX + +As an additional requirement for `Pool: pdb1`, the `spec.poolSettings.db.adminUser` and `spec.poolSettings.db.adminUser.secret` +must be provided. If they are not, the `autoUpgrade` specification is ignored. + +```yaml +apiVersion: database.oracle.com/v1 +kind: OrdsSrvs +metadata: + name: ordspoc-server +spec: + image: container-registry.oracle.com/database/ords:24.1.0 + forceRestart: true + globalSettings: + database.api.enabled: true + encPrivKey: + secretName: prvkey + passwordKey: privateKey + poolSettings: + - poolName: pdb1 + autoUpgradeORDS: true + autoUpgradeAPEX: true + db.connectionType: customurl + db.customURL: jdbc:oracle:thin:@//localhost:1521/PDB1 + db.secret: + secretName: pdb1-ords-auth + db.adminUser: SYS + db.adminUser.secret: + secretName: pdb1-sys-auth-enc + - poolName: pdb2 + db.connectionType: customurl + db.customURL: jdbc:oracle:thin:@//localhost:1521/PDB2 + db.secret: + secretName: pdb2-ords-auth-enc +``` + +## Minimum Privileges for Admin User + +The `db.adminUser` must have privileges to create users and objects in the database. For Oracle Autonomous Database (ADB), this could be `ADMIN` while for +non-ADBs this could be `SYS AS SYSDBA`. When you do not want to use `ADMIN` or `SYS AS SYSDBA` to install, upgrade, validate and uninstall ORDS a script is provided +to create a new user to be used. + +1. Download the equivalent version of ORDS to the image you will be using. +1. Extract the software and locate: `scripts/installer/ords_installer_privileges.sql` +1. Using SQLcl or SQL*Plus, connect to the Oracle PDB with SYSDBA privileges. +1. Execute the following script providing the database user: + ```sql + @/path/to/installer/ords_installer_privileges.sql privuser + exit + ``` diff --git a/docs/ordsservices/examples/adb.md b/docs/ordsservices/examples/adb.md new file mode 100644 index 00000000..ba53aac5 --- /dev/null +++ b/docs/ordsservices/examples/adb.md @@ -0,0 +1,108 @@ +# Example: Autonomous Database without the OraOperator + +This example walks through using the **ORDSSRVS controller** with an Oracle Autonomous Database. + +This assumes that an ADB has already been provisioned and is configured as "Secure Access from Anywhere". +Note that if behind a Proxy, this example will not work as the Wallet will need to be modified to support the proxy configuration. + + +### Cert-Manager and Oracle Database Operator installation + +Install the [Cert Manager](https://github.com/cert-manager/cert-manager/releases/download/v1.14.4/cert-manager.yaml) and the [Oracle Database Operator](https://github.com/oracle/oracle-database-operator) using the instractions in the Operator [README](https://github.com/oracle/oracle-database-operator/blob/main/README.md) file. + + +### ADB Wallet Secret + +Download the ADB Wallet and create a Secret, replacing `` with the path to the wallet zip file: + +```bash +kubectl create secret generic adb-wallet \ + --from-file= -n ordsnamespace +``` + +### ADB ADMIN Password Secret + +Create a Secret for the ADB ADMIN password, replacing with the real password: + +```bash +echo adb-db-auth-enc +openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > ca.k +openssl rsa -in ca.key -outform PEM -pubout -out public.pem +kubectl create secret generic prvkey --from-file=privateKey=ca.key -n ordsnamespace +openssl rsautl -encrypt -pubin -inkey public.pem -in adb-db-auth-enc |base64 > e_sidb-db-auth-enc +kubectl create secret generic adb-db-auth-enc --from-file=password=e_sidb-db-auth-enc -n ordsnamespace +rm adb-db-auth-enc e_sidb-db-auth-enc +``` + +### Create RestDataServices Resource + +1. Create a manifest for ORDS. + + As an ADB already maintains ORDS and APEX, `autoUpgradeORDS` and `autoUpgradeAPEX` will be ignored if set. A new DB User for ORDS will be created to avoid conflict with the pre-provisioned one. This user will be + named, `ORDS_PUBLIC_USER_OPER` if `db.username` is either not specified or set to `ORDS_PUBLIC_USER`. + + Replace with the ADB Name and ensure that the `db.wallet.zip.service` is valid for your ADB Workload (e.g. _TP or _HIGH, etc.): + + ```bash + echo " + apiVersion: database.oracle.com/v1 + kind: OrdsSrvs + metadata: + name: ords-adb + namespace: ordsnamespace + spec: + image: container-registry.oracle.com/database/ords:24.1.1 + globalSettings: + database.api.enabled: true + encPrivKey: + secretName: prvkey + passwordKey: privateKey + poolSettings: + - poolName: adb + db.wallet.zip.service: _TP + dbWalletSecret: + secretName: adb-wallet + walletName: Wallet_.zip + restEnabledSql.active: true + feature.sdw: true + plsql.gateway.mode: proxied + db.username: ORDS_PUBLIC_USER_OPER + db.secret: + secretName: adb-db-auth-enc + passwordKey: password + db.adminUser: ADMIN + db.adminUser.secret: + secretName: adb-db-auth-enc + passwordKey: password" | kubectl apply -f - + ``` + latest container-registry.oracle.com/database/ords version, **24.1.1**, valid as of **30-May-2024** + +1. Watch the restdataservices resource until the status is **Healthy**: + ```bash + kubectl get ordssrvs ords-adb -w + ``` + + **NOTE**: If this is the first time pulling the ORDS image, it may take up to 5 minutes. If APEX + is being installed for the first time by the Operator, it may remain in the **Preparing** + status for an additional 5 minutes. + +### Test + +Open a port-forward to the ORDS service, for example: + +```bash +kubectl port-forward service/ords-adb 8443:8443 +``` + +Direct your browser to: `https://localhost:8443/ords/adb` + +## Conclusion + +This example has a single database pool, named `adb`. It is set to: + +* Not automatically restart when the configuration changes: `forceRestart` is not set. + The pod must be manually resarted for new configurations to be picked-up. +* Automatically install/update ORDS on startup, if required. This occurs due to the database being detected as an ADB. +* Automatically install/update APEX on startup, if required: This occurs due to the database being detected as an ADB. +* The ADB `ADMIN` user will be used to connect the ADB to install APEX/ORDS +* Use the ADB Wallet file to connect to the database: `db.wallet.zip.service: adbpoc_tp` and `dbWalletSecret` \ No newline at end of file diff --git a/docs/ordsservices/examples/adb_oraoper.md b/docs/ordsservices/examples/adb_oraoper.md new file mode 100644 index 00000000..b0872fb3 --- /dev/null +++ b/docs/ordsservices/examples/adb_oraoper.md @@ -0,0 +1,176 @@ +# Example: Autonomous Database using the OraOperator + +This example walks through using the **ORDS Controller** with a Containerised Oracle Database created by the **ADB Controller** in the same Kubernetes Cluster. + +When connecting to a mTLS enabled ADB while using the OraOperator to retreive the Wallet as is done in the example, it is currently not supported to have multiple, different databases supported by the single Ordssrvs resource. This is due to a requirement to set the `TNS_ADMIN` parameter at the Pod level ([#97](https://github.com/oracle/oracle-database-operator/issues/97)). + +### Cert-Manager and Oracle Database Operator installation + +Install the [Cert Manager](https://github.com/cert-manager/cert-manager/releases/download/v1.14.4/cert-manager.yaml) and the [Oracle Database Operator](https://github.com/oracle/oracle-database-operator) using the instractions in the Operator [README](https://github.com/oracle/oracle-database-operator/blob/main/README.md) file. + +### Setup Oracle Cloud Authorisation + +In order for the OraOperator to access the ADB, some pre-requisites are required, as detailed [here](https://github.com/oracle/oracle-database-operator/blob/main/docs/adb/ADB_PREREQUISITES.md). Either establish Instance Principles or create the required ConfigMap/Secret. This example uses the later: + +```bash +kubectl create configmap oci-cred \ +--from-literal=tenancy= \ +--from-literal=user= \ +--from-literal=fingerprint= \ +--from-literal=region= + +kubectl create secret generic oci-privatekey \ +--from-file=privatekey= +``` + +### ADB ADMIN Password Secret + +Create a Secret for the ADB Admin password: + +```bash +DB_PWD=$(echo "ORDSpoc_$(date +%H%S%M)") + +kubectl create secret generic adb-oraoper-db-auth \ + --from-literal=adb-oraoper-db-auth=${DB_PWD} +``` + +**NOTE**: When binding to the ADB in a later step, the OraOperator will change the ADB password to what is specified in the Secret. + +### Bind the OraOperator to the ADB + +1. Obtain the OCID of the ADB and set to an environment variable: + + ``` + export ADB_OCID= + ``` + +1. Create a manifest to bind to the ADB. + + ```bash + echo " + apiVersion: database.oracle.com/v1alpha1 + kind: AutonomousDatabase + metadata: + name: adb-oraoper + spec: + hardLink: false + ociConfig: + configMapName: oci-cred + secretName: oci-privatekey + details: + autonomousDatabaseOCID: $ADB_OCID + wallet: + name: adb-oraoper-tns-admin + password: + k8sSecret: + name: adb-oraoper-db-auth" | kubectl apply -f - + ``` + +1. Update the ADMIN Password: + +```bash + kubectl patch adb adb-oraoper --type=merge \ + -p '{"spec":{"details":{"adminPassword":{"k8sSecret":{"name":"adb-oraoper-db-auth"}}}}}' +``` + +1. Watch the `adb` resource until the STATE is **AVAILABLE**: + + ```bash + kubectl get adb/adb-oraoper -w + ``` + +### Create encrypted password + + +```bash +echo ${DB_PWD} adb-db-auth-enc +openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > ca.k +openssl rsa -in ca.key -outform PEM -pubout -out public.pem +kubectl create secret generic prvkey --from-file=privateKey=ca.key -n ordsnamespace +openssl rsautl -encrypt -pubin -inkey public.pem -in adb-db-auth-enc |base64 > e_adb-db-auth-enc +kubectl create secret generic adb-oraoper-db-auth-enc --from-file=password=e_adb-db-auth-enc -n ordsnamespace +rm adb-db-auth-enc e_adb-db-auth-enc +``` + + + +### Create OrdsSrvs Resource + +1. Obtain the Service Name from the OraOperator + + ```bash + SERVICE_NAME=$(kubectl get adb adb-oraoper -o=jsonpath='{.spec.details.dbName}'_TP) + ``` + +1. Create a manifest for ORDS. + + As an ADB already maintains ORDS and APEX, `autoUpgradeORDS` and `autoUpgradeAPEX` will be ignored if set. A new DB User for ORDS will be created to avoid conflict with the pre-provisioned one. This user will be + named, `ORDS_PUBLIC_USER_OPER` if `db.username` is either not specified or set to `ORDS_PUBLIC_USER`. + + ```bash + echo " + apiVersion: database.oracle.com/v1 + kind: OrdsSrvs + metadata: + name: ords-adb-oraoper + namespace: ordsnamespace + spec: + image: container-registry.oracle.com/database/ords:24.1.1 + forceRestart: true + encPrivKey: + secretName: prvkey + passwordKey: privateKey + globalSettings: + database.api.enabled: true + poolSettings: + - poolName: adb-oraoper + db.connectionType: tns + db.tnsAliasName: $SERVICE_NAME + tnsAdminSecret: + secretName: adb-oraoper-tns-admin + restEnabledSql.active: true + feature.sdw: true + plsql.gateway.mode: proxied + db.username: ORDS_PUBLIC_USER_OPER + db.secret: + secretName: adb-oraoper-db-auth-enc + passwordKey: adb-oraoper-db-auth-enc + db.adminUser: ADMIN + db.adminUser.secret: + secretName: adb-oraoper-db-auth-enc + passwordKey: adb-oraoper-db-auth-enc" | kubectl apply -f - + ``` + latest container-registry.oracle.com/database/ords version, **24.1.1**, valid as of **30-May-2024** + +1. Watch the ordssrvs resource until the status is **Healthy**: + ```bash + kubectl get ordssrvs ords-adb-oraoper -n ordsnamespace -w + ``` + + **NOTE**: If this is the first time pulling the ORDS image, it may take up to 5 minutes. If APEX + is being installed for the first time by the Operator, it may remain in the **Preparing** + status for an additional 5 minutes. + + +### Test + +Open a port-forward to the ORDS service, for example: + +```bash +kubectl port-forward service/ords-adb-oraoper 8443:8443 +``` + +Direct your browser to: `https://localhost:8443/ords/adb-oraoper` + +## Conclusion + +This example has a single database pool, named `adb-oraoper`. It is set to: + +* Automatically restart when the configuration changes: `forceRestart: true` +* Automatically install/update ORDS on startup, if required. This occurs due to the database being detected as an ADB. +* Automatically install/update APEX on startup, if required: This occurs due to the database being detected as an ADB. +* The ADB `ADMIN` user will be used to connect the ADB to install APEX/ORDS +* Use a TNS connection string to connect to the database: `db.customURL: jdbc:oracle:thin:@//${CONN_STRING}` + The `tnsAdminSecret` Secret `adb-oraoper-tns-admin` was created by the OraOperator +* The `passwordKey` has been specified for both `db.secret` and `db.adminUser.secret` as `adb-oraoper-password` to match the OraOperator specification. +* The ADB `ADMIN` user will be used to connect the ADB to install APEX/ORDS \ No newline at end of file diff --git a/docs/ordsservices/examples/mongo_api.md b/docs/ordsservices/examples/mongo_api.md new file mode 100644 index 00000000..70391fbd --- /dev/null +++ b/docs/ordsservices/examples/mongo_api.md @@ -0,0 +1,160 @@ +# Example: Oracle API for MongoDB Support + +This example walks through using the **ORDSSRVS Controller** with a Containerised Oracle Database to enable MongoDB API Support. + + +### Cert-Manager and Oracle Database Operator installation + +Install the [Cert Manager](https://github.com/cert-manager/cert-manager/releases/download/v1.14.4/cert-manager.yaml) and the [Oracle Database Operator](https://github.com/oracle/oracle-database-operator) using the instractions in the Operator [README](https://github.com/oracle/oracle-database-operator/blob/main/README.md) file. + + +### Database Access + +This example assumes you have a running, accessible Oracle Database. For demonstration purposes, +the [Containerised Single Instance Database using the OraOperator](sidb_container.md) will be used. + +### Rest Enable a Schema + +In the database, create an ORDS-enabled user. As this example uses the [Containerised Single Instance Database using the OraOperator](sidb_container.md), the following was performed: + + +1. Connect to the database: + + ```bash + DB_PWD=$(kubectl get secrets sidb-db-auth --template='{{.data.password | base64decode}}') + POD_NAME=$(kubectl get pod -l "app=oraoper-sidb" -o custom-columns=NAME:.metadata.name --no-headers) + kubectl exec -it ${POD_NAME} -- sqlplus SYSTEM/${DB_PWD}@FREEPDB1 + ``` + +1. Create the User: + ```sql + create user MONGO identified by "My_Password1!"; + grant soda_app, create session, create table, create view, create sequence, create procedure, create job, + unlimited tablespace to MONGO; + -- Connect as new user + conn MONGO/My_Password1!@FREEPDB1; + exec ords.enable_schema; + ``` + +### Create encrypted secrets + +```bash +openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > ca.k +openssl rsa -in ca.key -outform PEM -pubout -out public.pem +kubectl create secret generic prvkey --from-file=privateKey=ca.key -n ordsnamespace +openssl rsautl -encrypt -pubin -inkey public.pem -in sidb-db-auth-enc |base64 > e_sidb-db-auth-enc +kubectl create secret generic sidb-db-auth-enc --from-file=password=e_sidb-db-auth-enc -n ordsnamespace +rm sidb-db-auth-enc e_sidb-db-auth-enc + +``` + +### Create ordssrvs Resource + +1. Retrieve the Connection String from the containerised SIDB. + + ```bash + CONN_STRING=$(kubectl get singleinstancedatabase oraoper-sidb \ + -o jsonpath='{.status.pdbConnectString}') + + echo $CONN_STRING + ``` + +1. Create a manifest for ORDS. + + As the DB in the Free image does not contain ORDS (or APEX), the following additional keys are specified for the pool: + * `autoUpgradeORDS` - Boolean; when true the ORDS will be installed/upgraded in the database + * `db.adminUser` - User with privileges to install, upgrade or uninstall ORDS in the database (SYS). + * `db.adminUser.secret` - Secret containing the password for `db.adminUser` (created in the first step) + + The `db.username` will be used as the ORDS schema in the database during the install/upgrade process (ORDS_PUBLIC_USER). + + ```bash + echo " + apiVersion: database.oracle.com/v4 + kind: ordssrvs + metadata: + name: ords-sidb + namespace: ordsnamespace + spec: + image: container-registry.oracle.com/database/ords:24.1.1 + forceRestart: true + encPrivKey: + secretName: prvkey + passwordKey: privateKey + globalSettings: + database.api.enabled: true + mongo.enabled: true + poolSettings: + - poolName: default + autoUpgradeORDS: true + restEnabledSql.active: true + plsql.gateway.mode: direct + jdbc.MaxConnectionReuseCount: 5000 + jdbc.MaxConnectionReuseTime: 900 + jdbc.SecondsToTrustIdleConnection: 1 + jdbc.InitialLimit: 100 + jdbc.MaxLimit: 100 + db.connectionType: customurl + db.customURL: jdbc:oracle:thin:@//${CONN_STRING} + db.username: ORDS_PUBLIC_USER + db.secret: + secretName: sidb-db-auth-enc + db.adminUser: SYS + db.adminUser.secret: + secretName: sidb-db-auth-enc" | kubectl apply -f - + ``` + latest container-registry.oracle.com/database/ords version, **24.1.1**, valid as of **30-May-2024** + +1. Watch the restdataservices resource until the status is **Healthy**: + ```bash + kubectl get ordssrvs ords-sidb -w + ``` + + **NOTE**: If this is the first time pulling the ORDS image, it may take up to 5 minutes. If APEX + is being installed for the first time by the Operator, it may remain in the **Preparing** + status for an additional 5 minutes. + + You can watch the APEX/ORDS Installation progress by running: + + ```bash + POD_NAME=$(kubectl get pod -l "app.kubernetes.io/instance=ords-sidb" -o custom-columns=NAME:.metadata.name -n ordsnamespace --no-headers) + + kubectl logs ${POD_NAME} -c ords-sidb-init -n ordsnamespace -f + ``` + +### Test + +1. Open a port-forward to the MongoAPI service, for example: + ```bash + kubectl port-forward service/ords-sidb 27017:27017 -n ordsnamespace + ``` + +1. Connect to ORDS using the MongoDB shell: + ```bash + mongosh --tlsAllowInvalidCertificates 'mongodb://MONGO:My_Password1!@localhost:27017/MONGO?authMechanism=PLAIN&authSource=$external&tls=true&retryWrites=false&loadBalanced=true' + ``` + +1. Insert some data: + ```txt + db.createCollection('emp'); + db.emp.insertOne({"name":"Blake","job": "Intern","salary":30000}); + db.emp.insertOne({"name":"Miller","job": "Programmer","salary": 70000}); + db.emp.find({"name":"Miller"}); + ``` + +## Conclusion + +This example has a single database pool, named `default`. It is set to: + +* Automatically restart when the configuration changes: `forceRestart: true` +* Automatically install/update ORDS on startup, if required: `autoUpgradeORDS: true` +* Use a basic connection string to connect to the database: `db.customURL: jdbc:oracle:thin:@//${CONN_STRING}` +* The `passwordKey` has been ommitted from both `db.secret` and `db.adminUser.secret` as the password was stored in the default key (`password`) +* The MongoAPI service has been enabled: `mongo.enabled: true` +* The MongoAPI service will default to port: `27017` as the property: `mongo.port` has been left undefined +* A number of JDBC parameters were set at the pool level for achieving high performance: + * `jdbc.MaxConnectionReuseCount: 5000` + * `jdbc.MaxConnectionReuseTime: 900` + * `jdbc.SecondsToTrustIdleConnection: 1` + * `jdbc.InitialLimit: 100` + * `jdbc.MaxLimit: 100` diff --git a/docs/ordsservices/examples/multi_pool.md b/docs/ordsservices/examples/multi_pool.md new file mode 100644 index 00000000..21c5f24d --- /dev/null +++ b/docs/ordsservices/examples/multi_pool.md @@ -0,0 +1,203 @@ +# Example: Multipool, Multidatabase using a TNS Names file + +This example walks through using the **ORDSSRVS Operator** with multiple databases using a TNS Names file. +Keep in mind that all pools are running in the same Pod, therefore, changing the configuration of one pool will require +a recycle of all pools. + +### Cert-Manager and Oracle Database Operator installation + +Install the [Cert Manager](https://github.com/cert-manager/cert-manager/releases/download/v1.14.4/cert-manager.yaml) and the [Oracle Database Operator](https://github.com/oracle/oracle-database-operator) using the instractions in the Operator [README](https://github.com/oracle/oracle-database-operator/blob/main/README.md) file. + +### TNS_ADMIN Secret + +Create a Secret with the contents of the TNS_ADMIN directory. This can be a single `tnsnames.ora` file or additional files such as `sqlnet.ora` or `ldap.ora`. +The example shows using a `$TNS_ADMIN` enviroment variable which points to a directory with valid TNS_ADMIN files. + +To create a secret with all files in the TNS_ADMIN directory: +```bash +kubectl create secret generic multi-tns-admin \ + --from-file=$TNS_ADMIN +``` + +To create a secret with just the tnsnames.ora file: +```bash +kubectl create secret generic multi-tns-admin \ + --from-file=$TNS_ADMIN/tnsnames.ora +``` + +In this example, 4 PDBs will be connected to and the example `tnsnames.ora` file contents are as below: +```text +PDB1=(DESCRIPTION=(ADDRESS_LIST=(LOAD_BALANCE=on)(ADDRESS=(PROTOCOL=TCP)(HOST=10.10.0.1)(PORT=1521)))(CONNECT_DATA=(SERVICE_NAME=PDB1))) + +PDB2=(DESCRIPTION=(ADDRESS_LIST=(LOAD_BALANCE=on)(ADDRESS=(PROTOCOL=TCP)(HOST=10.10.0.2)(PORT=1521)))(CONNECT_DATA=(SERVICE_NAME=PDB2))) + +PDB3=(DESCRIPTION=(ADDRESS_LIST=(LOAD_BALANCE=on)(ADDRESS=(PROTOCOL=TCP)(HOST=10.10.0.3)(PORT=1521)))(CONNECT_DATA=(SERVICE_NAME=PDB3))) + +PDB4=(DESCRIPTION=(ADDRESS_LIST=(LOAD_BALANCE=on)(ADDRESS=(PROTOCOL=TCP)(HOST=10.10.0.4)(PORT=1521)))(CONNECT_DATA=(SERVICE_NAME=PDB4))) +``` + +### PRIVATE KEY SECRET + +Secrets are encrypted using openssl rsa algorithm. Create public and private key. +Use private key to create a secret. + +```bash +openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > ca.key +openssl rsa -in ca.key -outform PEM -pubout -out public.pem +kubectl create secret generic prvkey --from-file=privateKey=ca.key -n ordsnamespace +``` + +### ORDS_PUBLIC_USER Secret + +Create a Secret for each of the databases `ORDS_PUBLIC_USER` user. +If multiple databases use the same password, the same secret can be re-used. + +The following secret will be used for PDB1: + +```bash +echo "THIS_IS_A_PASSWORD" > ordspwdfile +openssl rsautl -encrypt -pubin -inkey public.pem -in ordspwdfile |base64 > e_ordspwdfile +kubectl create secret generic pdb1-ords-auth-enc --from-file=password=e_ordspwdfile -n ordsnamespace +rm ordspwdfile e_ordspwdfile +``` + +The following secret will be used for PDB2: + +```bash +echo "THIS_IS_A_PASSWORD" > ordspwdfile +openssl rsautl -encrypt -pubin -inkey public.pem -in ordspwdfile |base64 > e_ordspwdfile +kubectl create secret generic pdb2-ords-auth-enc --from-file=password=e_ordspwdfile -n ordsnamespace +rm ordspwdfile e_ordspwdfile +``` + +The following secret will be used for PDB3 and PDB4: + +```bash +echo "THIS_IS_A_PASSWORD" > ordspwdfile +openssl rsautl -encrypt -pubin -inkey public.pem -in ordspwdfile |base64 > e_ordspwdfile +kubectl create secret generic multi-ords-auth-enc --from-file=password=e_ordspwdfile -n ordsnamespace +rm ordspwdfile e_ordspwdfile +``` + +### Privileged Secret (*Optional) + +If taking advantage of the [AutoUpgrade](../autoupgrade.md) functionality, create a secret for a user with the privileges to modify the ORDS and/or APEX schemas. + +In this example, only PDB1 will be set for [AutoUpgrade](../autoupgrade.md), the other PDBs already have APEX and ORDS installed. + +```bash + + + +echo "THIS_IS_A_PASSWORD" > syspwdfile +openssl rsautl -encrypt -pubin -inkey public.pem -in ordspwdfile |base64 > e_syspwdfile +kubectl create secret generic pdb1-priv-auth-enc --from-file=password=e_syspwdfile -n ordsnamespace +rm syspwdfile e_syspwdfile + +kubectl create secret generic pdb1-priv-auth \ + --from-literal=password=pdb1-battery-staple +``` + +### Create OrdsSrvs Resource + +1. Create a manifest for ORDS. + + ```bash + echo " + apiVersion: database.oracle.com/v1 + kind: OrdsSrvs + metadata: + name: ords-multi-pool + namespace: ordsnamespace + spec: + image: container-registry.oracle.com/database/ords:24.1.1 + forceRestart: true + encPrivKey: + secretName: prvkey + passwordKey: privateKey + globalSettings: + database.api.enabled: true + poolSettings: + - poolName: pdb1 + autoUpgradeORDS: true + autoUpgradeAPEX: true + db.connectionType: tns + db.tnsAliasName: PDB1 + tnsAdminSecret: + secretName: multi-tns-admin + restEnabledSql.active: true + feature.sdw: true + plsql.gateway.mode: proxied + db.username: ORDS_PUBLIC_USER + db.secret: + secretName: pdb1-ords-auth-enc + db.adminUser: SYS + db.adminUser.secret: + secretName: pdb1-priv-auth-enc + - poolName: pdb2 + db.connectionType: tns + db.tnsAliasName: PDB2 + tnsAdminSecret: + secretName: multi-tns-admin + restEnabledSql.active: true + feature.sdw: true + plsql.gateway.mode: proxied + db.username: ORDS_PUBLIC_USER + db.secret: + secretName: pdb2-ords-auth-enc + - poolName: pdb3 + db.connectionType: tns + db.tnsAliasName: PDB3 + tnsAdminSecret: + secretName: multi-tns-admin + restEnabledSql.active: true + feature.sdw: true + plsql.gateway.mode: proxied + db.username: ORDS_PUBLIC_USER + db.secret: + secretName: multi-ords-auth-enc + - poolName: pdb4 + db.connectionType: tns + db.tnsAliasName: PDB4 + tnsAdminSecret: + secretName: multi-tns-admin + restEnabledSql.active: true + feature.sdw: true + plsql.gateway.mode: proxied + db.username: ORDS_PUBLIC_USER + db.secret: + secretName: multi-ords-auth-enc" | kubectl apply -f - + ``` + latest container-registry.oracle.com/database/ords version, **24.1.1**, valid as of **30-May-2024** + +1. Watch the ordssrvs resource until the status is **Healthy**: + ```bash + kubectl get OrdsSrvs ords-multi-pool -n ordsnamespace -w + ``` + + **NOTE**: If this is the first time pulling the ORDS image, it may take up to 5 minutes. As APEX + is being installed for the first time by the Operator into PDB1, it will remain in the **Preparing** + status for an additional 5-10 minutes. + +### Test + +Open a port-forward to the ORDS service, for example: + +```bash +kubectl port-forward service/ords-multi-pool -n ordsnamespace 8443:8443 +``` + +1. For PDB1, direct your browser to: `https://localhost:8443/ords/pdb1` +1. For PDB2, direct your browser to: `https://localhost:8443/ords/pdb2` +1. For PDB3, direct your browser to: `https://localhost:8443/ords/pdb3` +1. For PDB4, direct your browser to: `https://localhost:8443/ords/pdb4` + +## Conclusion + +This example has multiple pools, named `pdb1`, `pdb2`, `pdb3`, and `pdb4`. + +* They all share the same `tnsAdminSecret` to connect using thier individual `db.tnsAliasName` +* They will all automatically restart when the configuration changes: `forceRestart: true` +* Only the `pdb1` pool will automatically install/update ORDS on startup, if required: `autoUpgradeORDS: true` +* Only the `pdb1` pool will automatically install/update APEX on startup, if required: `autoUpgradeAPEX: true` +* The `passwordKey` has been ommitted from both `db.secret` and `db.adminUser.secret` as the password was stored in the default key (`password`) diff --git a/docs/ordsservices/examples/sidb_container.md b/docs/ordsservices/examples/sidb_container.md new file mode 100644 index 00000000..804ecca4 --- /dev/null +++ b/docs/ordsservices/examples/sidb_container.md @@ -0,0 +1,154 @@ +# Example: Containerised Single Instance Database using the OraOperator + +This example walks through using the **ORDSSRVS Controller** with a Containerised Oracle Database created by the **SIDB Controller** in the same Kubernetes Cluster. + +### Cert-Manager and Oracle Database Operator installation + +Install the [Cert Manager](https://github.com/cert-manager/cert-manager/releases/download/v1.14.4/cert-manager.yaml) and the [Oracle Database Operator](https://github.com/oracle/oracle-database-operator) using the instractions in the Operator [README](https://github.com/oracle/oracle-database-operator/blob/main/README.md) file. + + +### Deploy a Containerised Oracle Database + +1. Create a Secret for the Database password: + + ```bash + DB_PWD=$(echo "ORDSpoc_$(date +%H%S%M)") + + kubectl create secret generic sidb-db-auth \ + --from-literal=password=${DB_PWD} + ``` +1. Create a manifest for the containerised Oracle Database. + + The POC uses an Oracle Free Image, but other versions may be subsituted; review the OraOperator Documentation for details on the manifests. + + ```bash + echo " + apiVersion: database.oracle.com/v1alpha1 + kind: SingleInstanceDatabase + metadata: + name: oraoper-sidb + spec: + replicas: 1 + image: + pullFrom: container-registry.oracle.com/database/free:23.4.0.0 + prebuiltDB: true + sid: FREE + edition: free + adminPassword: + secretName: sidb-db-auth + secretKey: password + pdbName: FREEPDB1" | kubectl apply -f - + ``` + latest container-registry.oracle.com/database/free version, **23.4.0.0**, valid as of **2-May-2024** + +1. Watch the `singleinstancedatabases` resource until the database status is **Healthy**: + + ```bash + kubectl get singleinstancedatabases/oraoper-sidb -w + ``` + + **NOTE**: If this is the first time pulling the free database image, it may take up to 15 minutes for the database to become available. + +### Create encryped secret + +```bash + +openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > ca.key +openssl rsa -in ca.key -outform PEM -pubout -out public.pem +kubectl create secret generic prvkey --from-file=privateKey=ca.key -n ordsnamespace + +echo "${DB_PWD}" > sidb-db-auth +openssl rsautl -encrypt -pubin -inkey public.pem -in sidb-db-auth |base64 > e_sidb-db-auth +kubectl create secret generic sidb-db-auth-enc --from-file=password=e_sidb-db-auth -n ordsnamespace +rm sidb-db-auth e_sidb-db-auth + + +``` + + +### Create RestDataServices Resource + +1. Retrieve the Connection String from the containerised SIDB. + + ```bash + CONN_STRING=$(kubectl get singleinstancedatabase oraoper-sidb \ + -o jsonpath='{.status.pdbConnectString}') + + echo $CONN_STRING + ``` + +1. Create a manifest for ORDS. + + As the DB in the Free image does not contain ORDS (or APEX), the following additional keys are specified for the pool: + * `autoUpgradeORDS` - Boolean; when true the ORDS will be installed/upgraded in the database + * `autoUpgradeAPEX` - Boolean; when true the APEX will be installed/upgraded in the database + * `db.adminUser` - User with privileges to install, upgrade or uninstall ORDS in the database (SYS). + * `db.adminUser.secret` - Secret containing the password for `db.adminUser` (created in the first step) + + The `db.username` will be used as the ORDS schema in the database during the install/upgrade process (ORDS_PUBLIC_USER). + + ```bash + echo " + apiVersion: database.oracle.com/v1 + kind: OrdsSrvs + metadata: + name: ords-sidb + namespace: ordsnamespace + spec: + image: container-registry.oracle.com/database/ords:24.1.1 + forceRestart: true + globalSettings: + database.api.enabled: true + poolSettings: + - poolName: default + autoUpgradeORDS: true + autoUpgradeAPEX: true + restEnabledSql.active: true + plsql.gateway.mode: direct + db.connectionType: customurl + db.customURL: jdbc:oracle:thin:@//${CONN_STRING} + db.username: ORDS_PUBLIC_USER + db.secret: + secretName: sidb-db-auth-enc + db.adminUser: SYS + db.adminUser.secret: + secretName: sidb-db-auth-enc" | kubectl apply -f - + ``` + latest container-registry.oracle.com/database/ords version, **24.1.1**, valid as of **30-May-2024** + +1. Watch the ordssrvs resource until the status is **Healthy**: + ```bash + kubectl get ordssrvs ords-sidb -n ordsnamespace -w + ``` + + **NOTE**: If this is the first time pulling the ORDS image, it may take up to 5 minutes. If APEX + is being installed for the first time by the Operator, it may remain in the **Preparing** + status for an additional 5 minutes. + + You can watch the APEX/ORDS Installation progress by running: + + ```bash + POD_NAME=$(kubectl get pod -l "app.kubernetes.io/instance=ords-sidb" -n ordsnamespace -o custom-columns=NAME:.metadata.name --no-headers) + + kubectl logs ${POD_NAME} -c ords-sidb-init -n ordsnamespace -f + ``` + +### Test + +Open a port-forward to the ORDS service, for example: + +```bash +kubectl port-forward service/ords-sidb -n ordsnamespace 8443:8443 +``` + +Direct your browser to: `https://localhost:8443/ords` + +## Conclusion + +This example has a single database pool, named `default`. It is set to: + +* Automatically restart when the configuration changes: `forceRestart: true` +* Automatically install/update ORDS on startup, if required: `autoUpgradeORDS: true` +* Automatically install/update APEX on startup, if required: `autoUpgradeAPEX: true` +* Use a basic connection string to connect to the database: `db.customURL: jdbc:oracle:thin:@//${CONN_STRING}` +* The `passwordKey` has been ommitted from both `db.secret` and `db.adminUser.secret` as the password was stored in the default key (`password`) diff --git a/docs/ordsservices/usecase01/create_mong_schema.sql b/docs/ordsservices/usecase01/create_mong_schema.sql new file mode 100644 index 00000000..a00ee441 --- /dev/null +++ b/docs/ordsservices/usecase01/create_mong_schema.sql @@ -0,0 +1,9 @@ +drop user MONGO cascade; +set echo on +set head on +create user MONGO identified by "My_Password1!"; +grant soda_app, create session, create table, create view, create sequence, create procedure, create job, +unlimited tablespace to MONGO; +conn MONGO/My_Password1!@158.180.233.248:30001/FREEPDB1 +exec ords.enable_schema; +exit; diff --git a/docs/ordsservices/usecase01/help b/docs/ordsservices/usecase01/help new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/docs/ordsservices/usecase01/help @@ -0,0 +1 @@ + diff --git a/docs/ordsservices/usecase01/makefile b/docs/ordsservices/usecase01/makefile new file mode 100644 index 00000000..76b47210 --- /dev/null +++ b/docs/ordsservices/usecase01/makefile @@ -0,0 +1,778 @@ +# +# Copyright (c) 2006, 2024, Oracle and/or its affiliates. +# +# +# NAME +# makefile: +# This makefile helps to set up multipool and sidb cases +# edit the following variables with your system information +# and execute make help to list the list of avilable targets +# + +export PDB1=pdb1 +export PDB2=pdb2 +export TNS1=(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=$(PDB1)))) +export TNS2=(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=$(PDB2)))) +export SIDB_PASSWORD=....write password here .... +export PDB1_PWD=.....write password here.... +export PDB2_PWD=.....write password.... +export ORDS_MULTI_POOL_PWD=....write password here.... +export USER_CONTAINER_REGISTRY=username@oracle.com <--Your OCR account +export OPRNAMESPACE=oracle-database-operator-system +export ORDSNAMESPACE=ordsnamespace + + +# +# DESCRIPTION +# Main makefile - see target table +# +# | Target | Description | +# +-------------+--------------------------------------------------------+ +# | step0a | create_ordsnamespace.yaml | +# +-------------+--------------------------------------------------------+ +# | step1a | setup certmaneger | +# +-------------+--------------------------------------------------------+ +# | step2a | setup operator oracle-database-operator.yaml | +# +-------------+--------------------------------------------------------+ +# | step3a | default scoped deployment default-ns-role-binding.yaml | +# +-------------+--------------------------------------------------------+ +# | step4a | node - persistent volume - storage class for the db | +# +-------------+--------------------------------------------------------+ +# | step5a | setup secrets | +# +----------------------------------------------------------------------+ +# | step6a | setup secrets for OCR | +# +----------------------------------------------------------------------+ +# | step7a | setup sidb | +# +----------------------------------------------------------------------+ +# | step8a | ⭐Setup REST SERVER ⭐ | +# +-------------+--------------------------------------------------------+ +# +# step[1-7]a are required to start mongodb API rest server +# +# step[9-11] test mongo API +# +-------------+--------------------------------------------------------+ +# | step9 | configure a mongo db user on sidb | +# +-------------+--------------------------------------------------------+ +# | step10 | ⭐Setup REST SERVER FOR MONGO API ⭐ | +# +-------------+--------------------------------------------------------+ +# | step11 | Test Mongo API | +# +-------------+--------------------------------------------------------+ +# +# step[12- ] test multi tns configuration +# +-------------+--------------------------------------------------------+ +# | step12 | create tns secret | +# +-------------+--------------------------------------------------------+ +# | step13 | create passwords secret | +# +-------------+--------------------------------------------------------+ +# | step14 | ⭐SetupMulti Rest Server ⭐ | +# +-------------+--------------------------------------------------------+ +# + + + +export WATCHLIST=$(OPRNAMESPACE),$(ORDSNAMESPACE) +export CREATE_SINGLEINSTANCE=create_singleinstance_db.yaml +export CERTMANAGER=https://github.com/jetstack/cert-manager/releases/latest/download/cert-manager.yaml +export SIDB_SECRET=sidb-db-auth +export ORDS_SECRET=ords-db-auth +export MULTI_ORDS_AUTH_SECRET=multi-ords-auth-enc +export PDB1_PRIV_AUTH_SECRET=pdb1-priv-auth-enc +export PDB2_PRIV_AUTH_SECRET=pdb2-priv-auth-enc + + +export SIDB_IMAGE=container-registry.oracle.com/database/free:23.4.0.0 +export ORDS_IMAGE=container-registry.oracle.com/database/ords:24.1.0 +export ORDS_IMAGE.1=container-registry.oracle.com/database/ords:24.1.1 +export SECRET_CONTAINER_REGISTRY=oracle-container-registry-secret +export ORACLE_CONTAINER_REGISTRY=container-registry.oracle.com +export REST_SERVER_NAME=ords-sidb +export REST_SERVER_NAME_MONGO=ords-sidb-mongo +export MONGOSH=mongosh-2.3.1-linux-x64 +export KIND=OrdsSrvs + +export TNSNAMES=./tnsnames.ora +export TNSADMIN=`pwd`/tnsadmin +export PRVKEY=ca.key +export PUBKEY=public.pem + +## CMD SECTION## +export KUBECTL=/usr/local/go/bin/kubectl +export DIFF=/usr/bin/diff +export MAKE=/usr/bin/make +export CURL=/usr/bin/curl +export TAR=/usr/bin/tar +export OPENSSL=/usr/bin/openssl + +## YAML AND OTHER FILES ## +export CREATE_ORDSNAMESPACE=create_$(ORDSNAMESPACE).yaml +export DEFAULT_NAMESPACE_SCOPE=default-ns-role-binding.yaml +export RST_NAMESPACE_SCOPE=ords-ns-role-binding.yaml +export ORACLE_OPERATOR_YAML=../../../oracle-database-operator.yaml +export NODE_RBAC=node-rbac.yaml +export STORAGE_CLASS_RBAC=storage-class-rbac.yaml +export PERSISTENT_VOLUME_RBAC=persistent-volume-rbac.yaml +export SIDB_CREATION=sidb_create.yaml +export SECRET_CONTAINER_REGISTRY_SCRIPT=create_registry_secret.sh +export REST_SERVER_CREATION=rest_server_creation.yaml +export REST_SERVER_CREATION_MONGO=rest_server_creation_mongo.yaml +export MULTISRV_MANIFEST=create_multisrv.yaml +export MONGOORADBUSER=MONGO + + +MAKEFILE=./makefile +.ONESHELL: + +define manpage +@printf "\n" +@printf "\033[7m%s\033[0m \033[7m%s\033[0m \033[7m%s\033[0m\n" "TARGET " "DESCRIPTION " "YAML FILE " +@printf "%s %s %s\n" "---------" " --------------------------------------------------" "--------------------------------------" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step0/a/d setup new namespace" " " "$(CREATE_ORDSNAMESPACE)" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step1/a/d setup certmaneger " " " "$(CERTMANAGER)" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step2/a/d setup operator" " " "$(shell basename $(ORACLE_OPERATOR_YAML))" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step3/a/d default scoped deployment" " " "$(DEFAULT_NAMESPACE_SCOPE)" +@printf "%-40s %+20s \033[1m %s\033[0m\n" " ords scoped deployment" " " "$(RST_NAMESPACE_SCOPE)" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step4/a/d node rbac" " " "$(NODE_RBAC)" +@printf "%-40s %+20s \033[1m %s\033[0m\n" " storage class " " " "$(STORAGE_CLASS_RBAC)" +@printf "%-40s %+20s \033[1m %s\033[0m\n" " persistent volume " " " "$(PERSISTENT_VOLUME_RBAC)" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step5/a/d setup db secret" " " "n/a" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step6/a/d setup registry secret" " " "$(SECRET_CONTAINER_REGISTRY_SCRIPT)" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step7/a/d setup sidb " " " "$(SIDB_CREATION)" +@printf "================================================\n" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step8/a/d setup RestServer " " " "$(REST_SERVER_CREATION)" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step9/-/- configure " " " "Mongo ora db user:$(MONGOORADBUSER)" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step10/a/d setup RestServer Mongo " " " "$(REST_SERVER_CREATION_MONGO)" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step11/-/- test mongodb API " " " "----" +@printf "================================================\n" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step12/a/d create secret for tnsadmin " " " "$(TNSADMIN)" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step13/a/d create secrets for adminusers" " " "---" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "step14/a/d/e setup Multi Ords services " " " "---" +@printf "================================================\n" +@printf "%-40s %+20s \033[1m %s\033[0m\n" "diagordsinit" "" "🔬dump initpod logs" + + + +@printf "================================================\n" +@printf " a=apply d=delete ⚡e=generate error ⚡\n" +@printf "\n" +endef + + +help:man +man: + $(call manpage) + +define namespace +cat< $(CREATE_ORDSNAMESPACE) +#apiVersion: v1 +#kind: Namespace +#metadata: +# labels: +# control-plane: controller-manager +# name: $(2) +EOF +$(KUBECTL) $(1) -f $(CREATE_ORDSNAMESPACE) +$(KUBECTL) get namespace +endef + +step0: + $(call namespace,$(ACTION),$(ORDSNAMESPACE)) +step0a: + $(MAKE) -f $(MAKEFILE) step0 ACTION=apply +step0d: + $(MAKE) -f $(MAKEFILE) step0 ACTION=delete + +step1: + $(KUBECTL) $(ACTION) -f $(CERTMANAGER) +step1a: + $(MAKE) -f $(MAKEFILE) ACTION=apply step1 + $(KUBECTL) get pod -n cert-manager +step1d: + $(MAKE) -f $(MAKEFILE) ACTION=delete step1 + + +define setwatchnamespace +@echo "Setting watch namespace list: $(WATCHLIST)" +sed 's/value: ""/value: "$(WATCHLIST)"/g' $(ORACLE_OPERATOR_YAML) > `basename $(ORACLE_OPERATOR_YAML)` +$(KUBECTL) $(1) -f `basename $(ORACLE_OPERATOR_YAML)` +$(DIFF) $(ORACLE_OPERATOR_YAML) `basename $(ORACLE_OPERATOR_YAML)` +$(KUBECTL) get pods -n $(OPRNAMESPACE) +endef + +step2: + $(call setwatchnamespace,$(ACTION)) +step2a: + $(MAKE) -f $(MAKEFILE) ACTION=apply step2 +step2d: + $(MAKE) -f $(MAKEFILE) ACTION=delete step2 + + +define namespacescpe +cat<$(RST_NAMESPACE_SCOPE) +#apiVersion: rbac.authorization.k8s.io/v1 +#kind: RoleBinding +#metadata: +# name: $(ORDSNAMESPACE)-rolebinding +# namespace: $(ORDSNAMESPACE) +#roleRef: +# apiGroup: rbac.authorization.k8s.io +# kind: ClusterRole +# name: oracle-database-operator-manager-role +#subjects: +#- kind: ServiceAccount +# name: default +# namespace: oracle-database-operator-system +EOF + +cat< $(DEFAULT_NAMESPACE_SCOPE) +#apiVersion: rbac.authorization.k8s.io/v1 +#kind: RoleBinding +#metadata: +# name: oracle-database-operator-oracle-database-operator-manager-rolebinding +# namespace: default +#roleRef: +# apiGroup: rbac.authorization.k8s.io +# kind: ClusterRole +# name: oracle-database-operator-manager-role +#subjects: +#- kind: ServiceAccount +# name: default +# namespace: oracle-database-operator-system +EOF + +$(KUBECTL) $(1) -f $(RST_NAMESPACE_SCOPE) +$(KUBECTL) $(1) -f $(DEFAULT_NAMESPACE_SCOPE) +$(KUBECTL) get RoleBinding -n $(ORDSNAMESPACE) + +endef + +step3: + $(call namespacescpe,$(ACTION)) + +step3a: + $(MAKE) -f $(MAKEFILE) ACTION=apply step3 + +step3d: + $(MAKE) -f $(MAKEFILE) ACTION=delete step3 + + +export NODE_RBAC=node-rbac.yaml +export STORAGE_CLASS_RBAC=storage-class-rbac.yaml +export PERSISTENT_VOLUME_RBAC=persistent-volume-rbac.yaml + + +define persistenvolume + +cat<$(NODE_RBAC) +#--- +#apiVersion: rbac.authorization.k8s.io/v1 +#kind: ClusterRole +#metadata: +# name: oracle-database-operator-manager-role-node +#rules: +#- apiGroups: +# - "" +# resources: +# - nodes +# verbs: +# - list +# - watch +#--- +#apiVersion: rbac.authorization.k8s.io/v1 +#kind: ClusterRoleBinding +#metadata: +# name: oracle-database-operator-manager-role-node-cluster-role-binding +#roleRef: +# apiGroup: rbac.authorization.k8s.io +# kind: ClusterRole +# name: oracle-database-operator-manager-role-node +#subjects: +#- kind: ServiceAccount +# name: default +# namespace: oracle-database-operator-system +EOF + +cat<$(STORAGE_CLASS_RBAC) +#--- +#apiVersion: rbac.authorization.k8s.io/v1 +#kind: ClusterRole +#metadata: +# name: oracle-database-operator-manager-role-storage-class +#rules: +#- apiGroups: +# - storage.k8s.io +# resources: +# - storageclasses +# verbs: +# - get +# - list +# - watch +#--- +#apiVersion: rbac.authorization.k8s.io/v1 +#kind: ClusterRoleBinding +#metadata: +# name: oracle-database-operator-manager-role-storage-class-cluster-role-binding +#roleRef: +# apiGroup: rbac.authorization.k8s.io +# kind: ClusterRole +# name: oracle-database-operator-manager-role-storage-class +#subjects: +#- kind: ServiceAccount +# name: default +# namespace: oracle-database-operator-system +#--- +EOF + +cat<$(PERSISTENT_VOLUME_RBAC) +# +#apiVersion: rbac.authorization.k8s.io/v1 +#kind: ClusterRole +#metadata: +# name: oracle-database-operator-manager-role-persistent-volume +#rules: +#- apiGroups: +# - "" +# resources: +# - persistentvolumes +# verbs: +# - get +# - list +# - watch +#--- +#apiVersion: rbac.authorization.k8s.io/v1 +#kind: ClusterRoleBinding +#metadata: +# name: oracle-database-operator-manager-role-persistent-volume-cluster-role-binding +#roleRef: +# apiGroup: rbac.authorization.k8s.io +# kind: ClusterRole +# name: oracle-database-operator-manager-role-persistent-volume +#subjects: +#- kind: ServiceAccount +# name: default +# namespace: oracle-database-operator-system +#--- +# +EOF + +$(KUBECTL) $(1) -f $(NODE_RBAC) +$(KUBECTL) $(1) -f $(STORAGE_CLASS_RBAC) +$(KUBECTL) $(1) -f $(PERSISTENT_VOLUME_RBAC) + +endef + +step4: + $(call persistenvolume,$(ACTION)) +step4a: + $(MAKE) -f $(MAKEFILE) ACTION=apply step4 +step4d: + $(MAKE) -f $(MAKEFILE) ACTION=delete step4 + + +export SYSPWDFILE1=syspwdfile +export ORDPWDFILE=ordspwdfile +export SIDB_PASSWORD_FILE=sidbpasswordfile + +export PRVKEY=ca.key +export PUBKEY=public.pem +export OPENSSL=/usr/bin/openssl + +step5a: + echo $(SIDB_PASSWORD) > $(SIDB_PASSWORD_FILE) + - $(KUBECTL) delete secret pubkey -n ${ORDSNAMESPACE} + - $(KUBECTL) delete secret prvkey -n ${ORDSNAMESPACE} + - $(KUBECTL) delete secret $(SIDB_SECRET) -n ${ORDSNAMESPACE} + - $(KUBECTL) delete secret $(ORDS_SECRET) -n ${ORDSNAMESPACE} + $(OPENSSL) genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > ${PRVKEY} + $(OPENSSL) rsa -in $(PRVKEY) -outform PEM -pubout -out $(PUBKEY) + $(KUBECTL) create secret generic pubkey --from-file=publicKey=$(PUBKEY) -n $(ORDSNAMESPACE) + $(KUBECTL) create secret generic prvkey --from-file=privateKey=$(PRVKEY) -n $(ORDSNAMESPACE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(SIDB_PASSWORD_FILE) |base64 > e_$(SIDB_PASSWORD_FILE) + $(KUBECTL) create secret generic $(SIDB_SECRET) --from-literal=password=$(SIDB_PASSWORD) -n $(OPRNAMESPACE) + $(KUBECTL) create secret generic $(ORDS_SECRET) --from-file=password=e_$(SIDB_PASSWORD_FILE) -n $(ORDSNAMESPACE) + $(RM) e_$(SIDB_PASSWORD_FILE) $(SIDB_PASSWORD_FILE) + +step5d: + - $(KUBECTL) delete secret pubkey -n ${ORDSNAMESPACE} + - $(KUBECTL) delete secret prvkey -n ${ORDSNAMESPACE} + - $(KUBECTL) delete secret $(SIDB_SECRET) -n ${ORDSNAMESPACE} + - $(KUBECTL) delete secret $(ORDS_SECRET) -n ${ORDSNAMESPACE} + + +define registry_secret +printf "#!/bin/bash \n" >$(SECRET_CONTAINER_REGISTRY_SCRIPT) +printf "echo enter password for $(USER_CONTAINER_REGISTRY)@$(ORACLE_CONTAINER_REGISTRY) \n" >$(SECRET_CONTAINER_REGISTRY_SCRIPT) +printf "read -s scpwd \n" >>$(SECRET_CONTAINER_REGISTRY_SCRIPT) +printf "$(KUBECTL) create secret docker-registry $(SECRET_CONTAINER_REGISTRY) --docker-server=$(ORACLE_CONTAINER_REGISTRY) --docker-username=$(USER_CONTAINER_REGISTRY) --docker-password=\u0024scpwd --docker-email=$(USER_CONTAINER_REGISTRY) -n $(OPRNAMESPACE) \n" >>$(SECRET_CONTAINER_REGISTRY_SCRIPT) +printf "$(KUBECTL) create secret docker-registry $(SECRET_CONTAINER_REGISTRY) --docker-server=$(ORACLE_CONTAINER_REGISTRY) --docker-username=$(USER_CONTAINER_REGISTRY) --docker-password=\u0024scpwd --docker-email=$(USER_CONTAINER_REGISTRY) -n $(ORDSNAMESPACE) \n" >>$(SECRET_CONTAINER_REGISTRY_SCRIPT) + +bash $(SECRET_CONTAINER_REGISTRY_SCRIPT) +endef + +step6a: + $(call registry_secret) + +step6d: + $(KUBECTL) delete secret $(SECRET_CONTAINER_REGISTRY) -n $(OPRNAMESPACE) + + +define sidb + +cat<$(SIDB_CREATION) +#apiVersion: database.oracle.com/v4 +#kind: SingleInstanceDatabase +#metadata: +# name: oraoper-sidb +# namespace: $(OPRNAMESPACE) +#spec: +# replicas: 1 +# image: +# pullFrom: $(SIDB_IMAGE) +# pullSecrets: $(SECRET_CONTAINER_REGISTRY) +# prebuiltDB: true +# sid: FREE +# listenerPort: 30001 +# edition: free +# adminPassword: +# secretName: $(SIDB_SECRET) +# secretKey: password +# pdbName: FREEPDB1 +EOF + +$(KUBECTL) $(1) -f $(SIDB_CREATION) +endef + +step7: + $(call sidb,$(ACTION)) +step7a: + $(MAKE) -f $(MAKEFILE) ACTION=apply step7 +step7d: + $(MAKE) -f $(MAKEFILE) ACTION=delete step7 + + +define restservice +cat<$(REST_SERVER_CREATION) +#apiVersion: database.oracle.com/v4 +#kind: $(KIND) +#metadata: +# name: $(REST_SERVER_NAME) +# namespace: $(ORDSNAMESPACE) +#spec: +# image: $(ORDS_IMAGE) +# forceRestart: true +# encPrivKey: +# secretName: prvkey +# passwordKey: privateKey +# globalSettings: +# database.api.enabled: true +# poolSettings: +# - poolName: default +# autoUpgradeORDS: true +# autoUpgradeAPEX: true +# restEnabledSql.active: true +# plsql.gateway.mode: direct +# db.connectionType: customurl +# db.customURL: jdbc:oracle:thin:@//$(2) +# db.username: ORDS_PUBLIC_USER +# db.secret: +# secretName: $(ORDS_SECRET) +# db.adminUser: SYS +# db.adminUser.secret: +# secretName: $(ORDS_SECRET) +# +EOF + +[ $(3) -eq 1 ] && { +sed -i 's/SYS/SYT/g' $(REST_SERVER_CREATION) +echo -e "TYPO" +} + +$(KUBECTL) $(1) -f $(REST_SERVER_CREATION) +endef + +step8: + $(eval TNS_ALIAS_CDB := $(shell $(KUBECTL) get SingleInstanceDatabase -n $(OPRNAMESPACE) --template '{{range .items}}{{.status.clusterConnectString}}{{"\n"}}{{end}}')) + $(eval TNS_ALIAS_PDB := $(shell $(KUBECTL) get SingleInstanceDatabase -n $(OPRNAMESPACE) --template '{{range .items}}{{.status.pdbConnectString}}{{"\n"}}{{end}}')) + echo $(TNS_ALIAS) + $(call restservice,$(ACTION),$(TNS_ALIAS_PDB),$(ERR)) +step8a: + $(MAKE) -f $(MAKEFILE) ACTION=apply step8 ERR=0 +step8d: + $(MAKE) -f $(MAKEFILE) ACTION=delete step8 ERR=0 +step8e: + $(MAKE) -f $(MAKEFILE) ACTION=apply step8 ERR=1 + +reloadop: + echo "RESTARTING OPERATOR" + $(eval OP1 := $(shell $(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -1|cut -d ' ' -f 1 )) + $(eval OP2 := $(shell $(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -2|tail -1|cut -d ' ' -f 1 )) + $(eval OP3 := $(shell $(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|tail -1|cut -d ' ' -f 1 )) + $(KUBECTL) get pod $(OP1) -n $(OPRNAMESPACE) -o yaml | kubectl replace --force -f - + $(KUBECTL) get pod $(OP2) -n $(OPRNAMESPACE) -o yaml | kubectl replace --force -f - + $(KUBECTL) get pod $(OP3) -n $(OPRNAMESPACE) -o yaml | kubectl replace --force -f - + +loginords: + @$(eval RESTPOD := $(shell $(KUBECTL) get pods --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}' -n $(ORDSNAMESPACE))) + $(KUBECTL) logs $(RESTPOD) -n $(ORDSNAMESPACE) + $(KUBECTL) exec $(RESTPOD) -n $(ORDSNAMESPACE) -it -- /bin/bash + +logindb: + $(eval PODPDB := $(shell $(KUBECTL) get pods --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}' -n $(OPRNAMESPACE)|grep -v oracle-database-operator)) + echo $(PODPDB) + $(KUBECTL) exec $(PODPDB) -n $(OPRNAMESPACE) -it -- bash + + +report: + $(KUBECTL) get pods -n $(OPRNAMESPACE) + $(KUBECTL) get SingleInstanceDatabase -n $(OPRNAMESPACE) + $(KUBECTL) get pods -n $(ORDSNAMESPACE) + $(KUBECTL) get $(KIND) -n $(ORDSNAMESPACE) + + +someattributes: + kubectl get SingleInstanceDatabase -n oracle-database-operator-system --template '{{range .items}}{{.status.connectString}}{{"\n"}}{{end}}' + kubectl get SingleInstanceDatabase -n oracle-database-operator-system --template '{{range .items}}{{.status.tcpsConnectString}}{{"\n"}}{{end}}' + kubectl get SingleInstanceDatabase -n oracle-database-operator-system --template '{{range .items}}{{.status.clusterConnectString}}{{"\n"}}{{end}}' + kubectl get SingleInstanceDatabase -n oracle-database-operator-system --template '{{range .items}}{{.status.tcpsPdbConnectString}}{{"\n"}}{{end}}' + kubectl get SingleInstanceDatabase -n oracle-database-operator-system --template '{{range .items}}{{.status.pdbConnectString}}{{"\n"}}{{end}}' + + + + + +dump: + @$(eval TMPSP := $(shell date "+%y%m%d%H%M%S" )) + @$(eval DIAGFILE := ./opdmp.$(TMPSP)) + @>$(DIAGFILE) + @echo "OPERATOR DUMP" >> $(DIAGFILE) + @echo "~~~~~~~~~~~~~" >> $(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -1|cut -d ' ' -f 1` -n $(OPRNAMESPACE) >>$(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|head -2|tail -1 | cut -d ' ' -f 1` -n $(OPRNAMESPACE) >>$(DIAGFILE) + $(KUBECTL) logs pod/`$(KUBECTL) get pods -n $(OPRNAMESPACE)|grep oracle-database-operator-controller|tail -1|cut -d ' ' -f 1` -n $(OPRNAMESPACE) >>$(DIAGFILE) + + + +step9: sql +define dbenv +$(1): DB_PWD=`$(KUBECTL) get secrets sidb-db-auth -n $(OPRNAMESPACE) --template='{{.data.password | base64decode}}'` +$(1): POD_NAME=`$(KUBECTL) get pod -l "app=oraoper-sidb" -o custom-columns=NAME:.metadata.name -n $(OPRNAMESPACE) --no-headers` +$(1): TNSSTR=`$(KUBECTL) get SingleInstanceDatabase -n $(OPRNAMESPACE) --template '{{range .items}}{{.status.pdbConnectString}}{{"\n"}}{{end}}'` +endef + +$(eval $(call dbenv,sqlplus sql)) +#$(eval $(call dbenv,sqlplus)) + +define copyfile +cat <create_mong_schema.sql +drop user MONGO cascade; +set echo on +set head on +create user MONGO identified by "My_Password1!"; +grant soda_app, create session, create table, create view, create sequence, create procedure, create job, +unlimited tablespace to MONGO; +conn MONGO/My_Password1!@${TNSSTR} +exec ords.enable_schema; +exit; +EOF +$(KUBECTL) cp ./create_mong_schema.sql $(POD_NAME):/home/oracle -n $(OPRNAMESPACE) +endef + +sql: + echo $(TNSSTR) + $(call copyfile) + @$(KUBECTL) exec -it $(POD_NAME) -n $(OPRNAMESPACE) -- sqlplus SYSTEM/$(DB_PWD)@$(TNSSTR) @/home/oracle/create_mong_schema.sql + +sqlplus: + @$(KUBECTL) exec -it $(POD_NAME) -n $(OPRNAMESPACE) -- sqlplus SYSTEM/$(DB_PWD)@$(TNSSTR) + + +define restservicemongo +cat <$(REST_SERVER_CREATION_MONGO) +#apiVersion: database.oracle.com/v4 +#kind: $(KIND) +#metadata: +# name: $(REST_SERVER_NAME_MONGO) +# namespace: $(ORDSNAMESPACE) +#spec: +# image: $(ORDS_IMAGE.1) +# forceRestart: true +# globalSettings: +# database.api.enabled: true +# mongo.enabled: true +# poolSettings: +# - poolName: default +# autoUpgradeORDS: true +# restEnabledSql.active: true +# plsql.gateway.mode: direct +# jdbc.MaxConnectionReuseCount: 5000 +# jdbc.MaxConnectionReuseTime: 900 +# jdbc.SecondsToTrustIdleConnection: 1 +# jdbc.InitialLimit: 100 +# jdbc.MaxLimit: 100 +# db.connectionType: customurl +# db.customURL: jdbc:oracle:thin:@//${2} +# db.username: ORDS_PUBLIC_USER +# db.secret: +# secretName: ords-db-auth +# db.adminUser: SYS +# db.adminUser.secret: +# secretName: ords-db-auth +EOF +$(KUBECTL) $(1) -f $(REST_SERVER_CREATION_MONGO) +endef + + + +step10: + $(eval TNS_ALIAS_PDB := $(shell $(KUBECTL) get SingleInstanceDatabase -n $(OPRNAMESPACE) --template '{{range .items}}{{.status.pdbConnectString}}{{"\n"}}{{end}}')) + echo $(TNS_ALIAS_PDB) + $(call restservicemongo,$(ACTION),$(TNS_ALIAS_PDB)) +step10a: + $(MAKE) -f $(MAKEFILE) ACTION=apply step10 +step10d: + $(MAKE) -f $(MAKEFILE) ACTION=delete step10 + + +step11: + echo "Open a port-forward to the MongoAPI service" + @nohup $(KUBECTL) port-forward service/$(REST_SERVER_NAME_MONGO) 27017:27017 -n $(ORDSNAMESPACE) 1>portfwd.log 2>&1 & + @echo "DOWNLOADING MONGOSH" + @$(CURL) https://downloads.mongodb.com/compass/$(MONGOSH).tgz --output mongosh-2.3.1-linux-x64.tgz + @echo "UNTAR FILE" + @$(TAR) -zxvf $(MONGOSH).tgz + ./$(MONGOSH)/bin/mongosh --tlsAllowInvalidCertificates 'mongodb://MONGO:My_Password1!@localhost:27017/MONGO?authMechanism=PLAIN&authSource=$external&tls=true&retryWrites=false&loadBalanced=true' + @echo "STOP PORT FRWD" + @kill `ps -ef | grep kubectl | grep 27017 | grep -v grep | awk '{printf $$2}'` + $(RM) $(MONGOSH).tgz + $(RM) -rf ./$(MONGOSH) + + +define buildtns +echo "Building tnsnames.ora" +cat <$(TNSADMIN)/$(TNSNAMES) +$(PDB1)=$(TNS1) + +$(PDB2)=$(TNS2) +EOF +$(KUBECTL) create secret generic multi-tns-admin -n $(ORDSNAMESPACE) --from-file=$(TNSADMIN)/ +endef + +step12a: + $(call buildtns) + +step12d: + $(KUBECTL) delete secret multi-tns-admin -n $(ORDSNAMESPACE) + +export SYSPWDFILE1=syspwdfile1 +export SYSPWDFILE2=syspwdfile2 +export ORDPWDFILE=ordspwdfile + + +step13a: + echo $(PDB1_PWD) > $(SYSPWDFILE1) + echo $(PDB2_PWD) > $(SYSPWDFILE2) + echo $(ORDS_MULTI_POOL_PWD) > $(ORDPWDFILE) + $(OPENSSL) genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > ${PRVKEY} + $(OPENSSL) rsa -in $(PRVKEY) -outform PEM -pubout -out $(PUBKEY) + #$(KUBECTL) create secret generic pubkey --from-file=publicKey=$(PUBKEY) -n $(ORDSNAMESPACE) + $(KUBECTL) create secret generic prvkey --from-file=privateKey=$(PRVKEY) -n $(ORDSNAMESPACE) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(SYSPWDFILE1) |base64 > e_$(SYSPWDFILE1) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(SYSPWDFILE2) |base64 > e_$(SYSPWDFILE2) + $(OPENSSL) rsautl -encrypt -pubin -inkey $(PUBKEY) -in $(ORDPWDFILE) |base64 > e_$(ORDPWDFILE) + $(KUBECTL) create secret generic $(PDB1_PRIV_AUTH_SECRET) --from-file=password=e_$(SYSPWDFILE1) -n $(ORDSNAMESPACE) + $(KUBECTL) create secret generic $(PDB2_PRIV_AUTH_SECRET) --from-file=password=e_$(SYSPWDFILE2) -n $(ORDSNAMESPACE) + $(KUBECTL) create secret generic $(MULTI_ORDS_AUTH_SECRET) --from-file=password=e_$(ORDPWDFILE) -n $(ORDSNAMESPACE) + $(RM) $(SYSPWDFILE1) $(SYSPWDFILE2) $(ORDPWDFILE) e_$(SYSPWDFILE1) e_$(SYSPWDFILE2) e_$(ORDPWDFILE) + +step13d: + - $(KUBECTL) delete secret pubkey -n $(ORDSNAMESPACE) + - $(KUBECTL) delete secret prvkey -n $(ORDSNAMESPACE) + - $(KUBECTL) delete secret $(PDB1_PRIV_AUTH_SECRET) -n $(ORDSNAMESPACE) + - $(KUBECTL) delete secret $(PDB2_PRIV_AUTH_SECRET) -n $(ORDSNAMESPACE) + - $(KUBECTL) delete secret $(MULTI_ORDS_AUTH_SECRET) -n $(ORDSNAMESPACE) + +define multisrv +cat <$(MULTISRV_MANIFEST) +#apiVersion: database.oracle.com/v4 +#kind: $(KIND) +#metadata: +# name: ords-multi-pool +# namespace: $(ORDSNAMESPACE) +#spec: +# image: container-registry.oracle.com/database/ords:24.1.1 +# forceRestart: true +# encPrivKey: +# secretName: prvkey +# passwordKey: privateKey +# globalSettings: +# database.api.enabled: true +# poolSettings: +# - poolName: pdb1 +# autoUpgradeAPEX: false +# autoUpgradeORDS: false +# db.connectionType: tns +# db.tnsAliasName: pdb1 +# tnsAdminSecret: +# secretName: multi-tns-admin +# restEnabledSql.active: true +# feature.sdw: true +# plsql.gateway.mode: proxied +# db.username: ORDS_PUBLIC_USER +# db.secret: +# secretName: $(MULTI_ORDS_AUTH_SECRET) +# db.adminUser: SYS +# db.adminUser.secret: +# secretName: $(PDB1_PRIV_AUTH_SECRET) +# - poolName: pdb2 +# autoUpgradeAPEX: false +# autoUpgradeORDS: false +# db.connectionType: tns +# db.tnsAliasName: PDB2 +# tnsAdminSecret: +# secretName: multi-tns-admin +# restEnabledSql.active: true +# feature.sdw: true +# plsql.gateway.mode: proxied +# db.username: ORDS_PUBLIC_USER +# db.secret: +# secretName: $(MULTI_ORDS_AUTH_SECRET) +# db.adminUser: SYS +# db.adminUser.secret: +# secretName: $(PDB1_PRIV_AUTH_SECRET) + +# +EOF +[ $(2) -eq 1 ] && { +sed -i 's/SYS/SYT/g' $(MULTISRV_MANIFEST) +echo -e "TYPO" +} + +$(KUBECTL) $(1) -f $(MULTISRV_MANIFEST) +endef + +step14: + $(call multisrv,$(ACTION),$(ERR)) +step14a: + $(MAKE) -f $(MAKEFILE) ACTION=apply ERR=0 step14 +step14d: + $(MAKE) -f $(MAKEFILE) ACTION=delete ERR=0 step14 +step14e: + $(MAKE) -f $(MAKEFILE) ACTION=apply ERR=1 step14 + + +define dumpinit +#!/bin/bash +NAMESPACE=${1} +KUBECTL=/usr/bin/kubectl +for _pod in `${KUBECTL} get pods --no-headers -o custom-columns=":metadata.name" --no-headers -n $${NAMESPACE}` +do + for _podinit in `${KUBECTL} get pod $${_pod} -n $${NAMESPACE} -o="custom-columns=INIT-CONTAINERS:.spec.initContainers[*].name" --no-headers` + do + echo "DUMPINIT $${_pod}:$${_podinit}" + ${KUBECTL} logs -f --since=0 $${_pod} -n $${NAMESPACE} -c $${_podinit} + done +done +endef + +diagordsinit: + $(call dumpinit ,$(ORDSNAMESPACE)) + diff --git a/docs/ordsservices/usecase01/tnsadmin/tnsnames.ora b/docs/ordsservices/usecase01/tnsadmin/tnsnames.ora new file mode 100644 index 00000000..1b1b8943 --- /dev/null +++ b/docs/ordsservices/usecase01/tnsadmin/tnsnames.ora @@ -0,0 +1,3 @@ +pdb1=(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=pdb1))) + +pdb2=(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=pdb2))) diff --git a/docs/ordsservices/usecase01/tnsadmin/tnsnames.ora.offline b/docs/ordsservices/usecase01/tnsadmin/tnsnames.ora.offline new file mode 100644 index 00000000..b58a8a66 --- /dev/null +++ b/docs/ordsservices/usecase01/tnsadmin/tnsnames.ora.offline @@ -0,0 +1 @@ +pdb1=(DESCRIPTION=(CONNECT_TIMEOUT=90)(RETRY_COUNT=30)(RETRY_DELAY=10)(TRANSPORT_CONNECT_TIMEOUT=70)(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan12.testrac.com)(PORT=1521)(IP=V4_ONLY))(LOAD_BALLANCE=ON)(ADDRESS=(PROTOCOL=TCP)(HOST=scan34.testrac.com)(PORT=1521)(IP=V4_ONLY))(CONNECT_DATA=(SERVER=DEDICATED)(SERVICE_NAME=TESTORDS))) diff --git a/docs/sharding/README.md b/docs/sharding/README.md index 239dd767..661e2546 100644 --- a/docs/sharding/README.md +++ b/docs/sharding/README.md @@ -24,16 +24,15 @@ The Oracle Sharding database controller provides end-to-end automation of Oracle ## Using Oracle Database Operator Sharding Controller -Following sections provide the details for deploying Oracle Globally Distributed Database using Oracle Database Operator Sharding Controller with different use cases: +Following sections provide the details for deploying Oracle Globally Distributed Database (Oracle Sharded Database) using Oracle Database Operator Sharding Controller with different use cases: * [Prerequisites for running Oracle Sharding Database Controller](#prerequisites-for-running-oracle-sharding-database-controller) * [Oracle Database 23ai Free](#oracle-database-23ai-free) -* [Provisioning Oracle Globally Distributed Database Topology System-Managed Sharding in a Cloud-Based Kubernetes Cluster](#provisioning-oracle-globally-distributed-database-topology-with-system-managed-sharding-in-a-cloud-based-kubernetes-cluster) -* [Provisioning Oracle Globally Distributed Database Topology User Defined Sharding in a Cloud-Based Kubernetes Cluster](#provisioning-oracle-globally-distributed-database-topology-with-user-defined-sharding-in-a-cloud-based-kubernetes-cluster) -* [Provisioning Oracle Globally Distributed Database System-Managed Sharding with Raft replication enabled in a Cloud-Based Kubernetes Cluster](#provisioning-oracle-globally-distributed-database-topology-with-system-managed-sharding-and-raft-replication-enabled-in-a-cloud-based-kubernetes-cluster) +* [Provisioning Sharding Topology with System-Managed Sharding in a Cloud-Based Kubernetes Cluster](#provisioning-sharding-topology-with-system-managed-sharding-in-a-cloud-based-kubernetes-cluster) +* [Provisioning Sharding Topology with User Defined Sharding in a Cloud-Based Kubernetes Cluster](#provisioning-sharding-topology-with-user-defined-sharding-in-a-cloud-based-kubernetes-cluster) +* [Provisioning System-Managed Sharding Topology with Raft replication enabled in a Cloud-Based Kubernetes Cluster](#provisioning-system-managed-sharding-topology-with-raft-replication-enabled-in-a-cloud-based-kubernetes-cluster) * [Connecting to Shard Databases](#connecting-to-shard-databases) * [Debugging and Troubleshooting](#debugging-and-troubleshooting) -* [Known Issues](#known-issues) **Note** Before proceeding to the next section, you must complete the instructions given in each section, based on your enviornment, before proceeding to next section. @@ -92,6 +91,8 @@ You can either download the images and push them to your Docker Images Repositor **Note:** In case you want to use the `Oracle Database 23ai Free` Image for Database and GSM, refer to section [Oracle Database 23ai Free](#oracle-database-23ai-free) for more details. +### 4. Create a namespace for the Oracle DB Sharding Setup + ### 4. Create a namespace for the Oracle Globally Distributed Database Setup Create a Kubernetes namespace named `shns`. All the resources belonging to the Oracle Globally Distributed Database Topology Setup will be provisioned in this namespace named `shns`. For example: @@ -108,7 +109,7 @@ You can either download the images and push them to your Docker Images Repositor Create a Kubernetes secret named `db-user-pass-rsa` using these steps: [Create Kubernetes Secret](./provisioning/create_kubernetes_secret_for_db_user.md) -After you have the above prerequisites completed, you can proceed to the next section for your environment to provision the Oracle Globally Distributed Database Topology. +After you have the above prerequisites completed, you can proceed to the next section for your environment to provision the Oracle Database Sharding Topology. ### 6. Provisioning a Persistent Volume having an Oracle Database Gold Image @@ -118,6 +119,71 @@ In case of an `OCI OKE` cluster, you can use this Persistent Volume during provi You can refer [here](./provisioning/provisioning_persistent_volume_having_db_gold_image.md) for the steps involved. +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. So, this step will not be needed if you are deploying Oracle Sharded Database using Oracle 23ai Free Database and GSM Images. + +## Oracle Database 23ai Free + +Please refer to [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) documentation for more details. + +If you want to use Oracle Database 23ai Free Image for Database and GSM for deployment of the Sharded Database using Sharding Controller in Oracle Database Kubernetes Operator, you need to consider the below points: + +* To deploy using the FREE Database and GSM Image, you will need to add the additional parameter `dbEdition: "free"` to the .yaml file. +* Refer to [Sample Sharded Database Deployment using Oracle 23ai FREE Database and GSM Images](./provisioning/free/sharding_provisioning_with_free_images.md) for an example. +* For Oracle Database 23ai Free, you can control the `CPU` and `Memory` allocation of the PODs using tags `cpu` and `memory` respectively but tags `INIT_SGA_SIZE` and `INIT_PGA_SIZE` to control the SGA and PGA allocation at the database level are `not` supported. +* Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. +* Total number of chunks for FREE Database defaults to `12` if `CATALOG_CHUNKS` parameter is not specified. This default value is determined considering limitation of 12 GB of user data on disk for oracle free database. + + +## Provisioning Sharding Topology with System-Managed Sharding in a Cloud-Based Kubernetes Cluster + +Deploy Oracle Database Sharding Topology with `System-Managed Sharding` on your Cloud based Kubernetes cluster. + +In this example, the deployment uses the YAML file based on `OCI OKE` cluster. There are multiple use case possible for deploying the Oracle Database Sharding Topology covered by below examples: + +[1. Provisioning Oracle Sharded Database with System-Managed Sharding without Database Gold Image](./provisioning/system_sharding/ssharding_provisioning_without_db_gold_image.md) +[2. Provisioning Oracle Sharded Database with System-Managed Sharding with number of chunks specified](./provisioning/system_sharding/ssharding_provisioning_with_chunks_specified.md) +[3. Provisioning Oracle Sharded Database with System-Managed Sharding with additional control on resources like Memory and CPU allocated to Pods](./provisioning/system_sharding/ssharding_provisioning_with_control_on_resources.md) +[4. Provisioning Oracle Sharded Database with System-Managed Sharding by cloning database from your own Database Gold Image in the same Availability Domain(AD)](./provisioning/system_sharding/ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md) +[5. Provisioning Oracle Sharded Database with System-Managed Sharding by cloning database from your own Database Gold Image across Availability Domains(ADs)](./provisioning/system_sharding/ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md) +[6. Provisioning Oracle Sharded Database with System-Managed Sharding and send Notification using OCI Notification Service](./provisioning/system_sharding/ssharding_provisioning_with_notification_using_oci_notification.md) +[7. Scale Out - Add Shards to an existing Oracle Sharded Database provisioned earlier with System-Managed Sharding](./provisioning/system_sharding/ssharding_scale_out_add_shards.md) +[8. Scale In - Delete an existing Shard from a working Oracle Sharded Database provisioned earlier with System-Managed Sharding](./provisioning/system_sharding/ssharding_scale_in_delete_an_existing_shard.md) + + +## Provisioning Sharding Topology with User Defined Sharding in a Cloud-Based Kubernetes Cluster + +Deploy Oracle Database Sharding Topology with `User Defined Sharding` on your Cloud based Kubernetes cluster. + +In this example, the deployment uses the YAML file based on `OCI OKE` cluster. There are multiple use case possible for deploying the Oracle Database Sharding Topology covered by below examples: + +[1. Provisioning Oracle Sharded Database with User Defined Sharding without Database Gold Image](./provisioning/user-defined-sharding/udsharding_provisioning_without_db_gold_image.md) +[2. Provisioning Oracle Sharded Database with User Defined Sharding with additional control on resources like Memory and CPU allocated to Pods](./provisioning/user-defined-sharding/udsharding_provisioning_with_control_on_resources.md) +[3. Provisioning Oracle Sharded Database with User Defined Sharding by cloning database from your own Database Gold Image in the same Availability Domain(AD)](./provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_gold_image_in_same_ad.md) +[4. Provisioning Oracle Sharded Database with User Defined Sharding by cloning database from your own Database Gold Image across Availability Domains(ADs)](./provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_from_gold_image_across_ads.md) +[5. Provisioning Oracle Sharded Database with User Defined Sharding and send Notification using OCI Notification Service](./provisioning/user-defined-sharding/udsharding_provisioning_with_notification_using_oci_notification.md) +[6. Scale Out - Add Shards to an existing Oracle Sharded Database provisioned earlier with User Defined Sharding](./provisioning/user-defined-sharding/udsharding_scale_out_add_shards.md) +[7. Scale In - Delete an existing Shard from a working Oracle Sharded Database provisioned earlier with User Defined Sharding](./provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md) + + +## Provisioning System-Managed Sharding Topology with Raft replication enabled in a Cloud-Based Kubernetes Cluster + +Deploy Oracle Database Sharding Topology with `System-Managed Sharding with SNR RAFT enabled` on your Cloud based Kubernetes cluster. + +**NOTE: SNR RAFT Feature is available only for Oracle 23ai RDBMS and Oracle 23ai GSM version.** + +In this example, the deployment uses the YAML file based on `OCI OKE` cluster. There are multiple use case possible for deploying the Oracle Database Sharding Topology covered by below examples: + +[1. Provisioning System-Managed Sharding Topology with Raft replication enabled without Database Gold Image](./provisioning/snr_system_sharding/snr_ssharding_provisioning_without_db_gold_image.md) +[2. Provisioning System-Managed Sharding Topology with Raft replication enabled with number of chunks specified](./provisioning/snr_system_sharding/snr_ssharding_provisioning_with_chunks_specified.md) +[3. Provisioning System-Managed Sharding Topology with Raft replication enabled with additional control on resources like Memory and CPU allocated to Pods](./provisioning/snr_system_sharding/snr_ssharding_provisioning_with_control_on_resources.md) +[4. Provisioning System-Managed Sharding Topology with Raft replication enabled by cloning database from your own Database Gold Image in the same Availability Domain(AD)](./provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md) +[5. Provisioning System-Managed Sharding Topology with Raft replication enabled by cloning database from your own Database Gold Image across Availability Domains(ADs)](./provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md) +[6. Provisioning System-Managed Sharding Topology with Raft replication enabled and send Notification using OCI Notification Service](./provisioning/snr_system_sharding/snr_ssharding_provisioning_with_notification_using_oci_notification.md) +[7. Scale Out - Add Shards to an existing Oracle Sharded Database provisioned earlier with System-Managed Sharding with RAFT replication enabled](./provisioning/snr_system_sharding/snr_ssharding_scale_out_add_shards.md) +[8. Scale In - Delete an existing Shard from a working Oracle Sharded Database provisioned earlier with System-Managed Sharding with RAFT reolication enabled](./provisioning/snr_system_sharding/snr_ssharding_scale_in_delete_an_existing_shard.md) + +You can refer [here](./provisioning/provisioning_persistent_volume_having_db_gold_image.md) for the steps involved. + **NOTE:** Provisioning the Oracle Globally Distributed Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. So, this step will not be needed if you are deploying Oracle Globally Distributed Database using Oracle 23ai Free Database and GSM Images. ## Oracle Database 23ai Free diff --git a/docs/sharding/provisioning/create_kubernetes_secret_for_db_user.md b/docs/sharding/provisioning/create_kubernetes_secret_for_db_user.md index 99620f04..744f972c 100644 --- a/docs/sharding/provisioning/create_kubernetes_secret_for_db_user.md +++ b/docs/sharding/provisioning/create_kubernetes_secret_for_db_user.md @@ -1,14 +1,14 @@ # Create kubernetes secret for db user -Below are the steps to create an encrypted file with a password for the DB User: +Use the following steps to create an encrypted file with a password for the DB User: -- Create a text file which is having the password which you want to use for the DB user. +- Create a text file that has the password that you want to use for the DB user. - Create an RSA key pair using `openssl`. -- Encrypt the text file with password using `openssl` with the RSA key pair generated earlier. +- Encrypt the text file with a password, using `openssl` with the RSA key pair generated earlier. - Remove the initial text file. -- Create the Kubernetes secret named `db-user-pass-rsa` using the encrypted file. +- Create the Kubernetes Secret named `db-user-pass-rsa` using the encrypted file. -Please refer the below example for the above steps: +To understand how to create your own file, use the following example: ```sh # Create a directory for files for the secret: @@ -43,4 +43,4 @@ kubectl delete secret $SECRET_NAME -n $NAMESPACE # Create the Kubernetes secret in namespace "NAMESPACE" kubectl create secret generic $SECRET_NAME --from-file=$PWDFILE_ENC --from-file=${PRIVKEY} -n $NAMESPACE -``` \ No newline at end of file +``` diff --git a/docs/sharding/provisioning/database_connection.md b/docs/sharding/provisioning/database_connection.md index 5671520b..58a54930 100644 --- a/docs/sharding/provisioning/database_connection.md +++ b/docs/sharding/provisioning/database_connection.md @@ -1,10 +1,10 @@ # Database Connectivity -The Oracle Globally Distributed Database Topology deployed by Sharding Controller in Oracle Database Operator has an external IP available for each of the container. +The Oracle Database Sharding Topology deployed by Sharding Controller in Oracle Database Operator has an external IP available for each of the containers. ## Below is an example setup with connection details -Check the details of the Oracle Globally Distributed Database Topology provisioned using Sharding Controller: +Check the details of the Sharding Topology provisioned by using the Sharding Controller: ```sh $ kubectl get all -n shns @@ -35,7 +35,7 @@ statefulset.apps/shard1 1/1 10d statefulset.apps/shard2 1/1 10d ``` -After you have the external IP address, you can use the services shown below to make the database connection using the above example: +After you have the external IP address, you can use the services shown below to make the database connection. Using the preceding example, that file should look as follows: 1. **Direct connection to the CATALOG Database**: Connect to the service `catalogpdb` on catalog container external IP `xx.xx.xx.116` on port `1521` 2. **Direct connection to the shard Database SHARD1**: Connect to the service `shard1pdb` on catalog container external IP `xx.xx.xx.187` on port `1521` diff --git a/docs/sharding/provisioning/debugging.md b/docs/sharding/provisioning/debugging.md index 330cfc0e..372f104d 100644 --- a/docs/sharding/provisioning/debugging.md +++ b/docs/sharding/provisioning/debugging.md @@ -1,50 +1,50 @@ # Debugging and Troubleshooting -When the Oracle Globally Distributed Database Topology is provisioned using the Oracle Database Kubernetes Operator, the debugging of an issue with the deployment depends on at which stage the issue has been seen. +When the Oracle Database Sharding Topology is provisioned using the Oracle Database Kubernetes Operator, debugging an issue with the deployment depends on which stage the issue is seen. -Below are the possible cases and the steps to debug such an issue: +The following sections provide possible issue cases, and the steps to debug such an issue: ## Failure during the provisioning of Kubernetes Pods -In case the failure occurs during the provisioning, we need to check the status of the Kubernetes Pod which has failed to deployed. +If the failure occurs during the provisioning, then check the status of the Kubernetes Pod that has failed to be deployed. -Use the below command to check the logs of the Pod which has a failure. For example, for failure in case of Pod `pod/catalog-0`, use below command: +To check the logs of the Pod that has a failure, use the command that follows. In this example, we are checking for failure in provisioning Pod `pod/catalog-0`: ```sh kubectl logs -f pod/catalog-0 -n shns ``` -In case the Pod has failed to provision due to an issue with the Docker Image, you will see the error `Error: ErrImagePull` in above logs. +If the Pod has failed to provision due to an issue with the Docker Image, then you will see the error `Error: ErrImagePull` in the logs displayed by the command. -If the Pod has not yet got initialized, use the below command to find the reason for it: +If the Pod has not yet been initialized, then use the following command to find the reason for it: ```sh kubectl describe pod/catalog-0 -n shns ``` -In case the failure is related to the Cloud Infrastructure, you will need to troubleshooting that using the documentation from the cloud provider. +If the failure is related to the Cloud Infrastructure, then troubleshoot the infrastructure using the documentation from the Cloud infrastructure provider. ## Failure in the provisioning of the Oracle Globally Distributed Database -In case the failure occures after the Kubernetes Pods are created but during the execution of the scripts to create the shard databases, catalog database or the GSM, you will need to trobleshoot that at the individual Pod level. +If the failure occures after the Kubernetes Pods are created but during the execution of the scripts to create the shard databases, catalog database or the GSM, then you must troubleshoot that failure at the individual Pod level. -Initially, check the logs of the Kubernetes Pod using the command like below (change the name of the Pod with the actual Pod) +Initially, check the logs of the Kubernetes Pod using the following command (change the name of the Pod in the command with the actual Pod): ```sh kubectl logs -f pod/catalog-0 -n shns ``` -To check the logs at the GSM or at the Database level or at the host level, switch to the corresponding Kubernetes container using the command like below: +To check the logs at the GSM level, the database level, or at the host level, switch to the corresponding Kubernetes container. For example: ```sh kubectl exec -it catalog-0 -n shns /bin/bash ``` -Now, you can troubleshooting the corresponding component using the alert log or the trace files etc just like a normal Oracle Globally Distributed Database Deployment. Please refer to [Oracle Globally Distributed Database Documentation](https://docs.oracle.com/en/database/oracle/oracle-database/19/shard/sharding-troubleshooting.html#GUID-629262E5-7910-4690-A726-A565C59BA73E) for this purpose. +When you are in the correct Kubernetes container, you can troubleshooting the corresponding component using the alert log, the trace files, and so on, just as you would with a normal Sharding Database Deployment. For more information, see: [Oracle Database Sharding Documentation](https://docs.oracle.com/en/database/oracle/oracle-database/19/shard/sharding-troubleshooting.html#GUID-629262E5-7910-4690-A726-A565C59BA73E) ## Debugging using Database Events -* You can enable database events as part of the Oracle Globally Distributed Database Deployment -* This can be enabled using the `envVars` -* One example of enabling Database Events is [sharding_provisioning_with_db_events.md](./debugging/sharding_provisioning_with_db_events.md) \ No newline at end of file +* You can enable database events as part of the Sharded Database Deployment +* Enable events using `envVars` +* One example of enabling Database Events is [sharding_provisioning_with_db_events.md](./debugging/sharding_provisioning_with_db_events.md) diff --git a/docs/sharding/provisioning/debugging/sharding_provisioning_with_db_events.md b/docs/sharding/provisioning/debugging/sharding_provisioning_with_db_events.md index 763f2dc8..fa73920f 100644 --- a/docs/sharding/provisioning/debugging/sharding_provisioning_with_db_events.md +++ b/docs/sharding/provisioning/debugging/sharding_provisioning_with_db_events.md @@ -1,17 +1,17 @@ -# Example of provisioning Oracle Globally Distributed Database along with DB Events set at Database Level +# Example of provisioning Oracle Sharded Database along with DB Events set at Database Level **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. This example sets a Database Event at the Database Level for Catalog and Shard Databases. -The Oracle Globally Distributed Database in this example is deployed with System-Managed Sharding type. In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Globally Distributed Database topology with System-Managed Sharding is deployed using Oracle Sharding controller. +The sharded database in this example is deployed with System-Managed Sharding type. In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with System-Managed Sharding is deployed using Oracle Sharding controller. **NOTE:** In this use case, because DBCA creates the database automatically during the deployment, the time required to create the database is greater than the time it takes when the database is created by cloning from a Database Gold Image. -This example uses `sharding_provisioning_with_db_events.yaml` to provision an Oracle Globally Distributed Database topology using Oracle Sharding controller with: +This example uses `sharding_provisioning_with_db_events.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three Shard Database Pods: `shard1`, `shard2` and `shard3` +* Three sharding Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Database Event: `10798 trace name context forever, level 7` set along with `GWM_TRACE level 263` diff --git a/docs/sharding/provisioning/debugging/sharding_provisioning_with_db_events.yaml b/docs/sharding/provisioning/debugging/sharding_provisioning_with_db_events.yaml index 7d136d58..40ad600a 100644 --- a/docs/sharding/provisioning/debugging/sharding_provisioning_with_db_events.yaml +++ b/docs/sharding/provisioning/debugging/sharding_provisioning_with_db_events.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # --- -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: ShardingDatabase metadata: name: shardingdatabase-sample @@ -66,4 +66,3 @@ spec: role: primary - name: oltp_ro_svc role: primary - namespace: shns diff --git a/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.md b/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.md index f6b53462..61641312 100644 --- a/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.md +++ b/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.md @@ -1,25 +1,25 @@ -# Example of provisioning Oracle Globally Distributed Database with Oracle 23ai FREE Database and GSM Images +# Example of provisioning Oracle Sharded Database with Oracle 23ai FREE Database and GSM Images **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. This example uses the Oracle 23ai FREE Database and GSM Images. -The Oracle Globally Distributed Database in this example is deployed with System-Managed Sharding type. In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with System-Managed Sharding is deployed using Oracle Sharding controller. +The sharded database in this example is deployed with System-Managed Sharding type. In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with System-Managed Sharding is deployed using Oracle Sharding controller. **NOTE:** In this use case, because DBCA creates the database automatically during the deployment, the time required to create the database is greater than the time it takes when the database is created by cloning from a Database Gold Image. This example uses `sharding_provisioning_with_free_images.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three Shard Database Pods: `shard1`, `shard2` and `shard3` +* Three sharding Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` To get the Oracle 23ai FREE Database and GSM Images: * The Oracle 23ai FREE RDBMS Image used is `container-registry.oracle.com/database/free:latest`. Check [Oracle Database Free Get Started](https://www.oracle.com/database/free/get-started/?source=v0-DBFree-ChatCTA-j2032-20240709) for details. - * Use the Oracle 23ai FREE GSM Image used is `container-registry.oracle.com/database/gsm:latest`. * To pull the above image from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. + * Use the Oracle 23ai FREE GSM Binaries `LINUX.X64_234000_gsm.zip` as listed on page [Oracle Database Free Get Started](https://www.oracle.com/database/free/get-started/?source=v0-DBFree-ChatCTA-j2032-20240709) and prepare the GSM Container Image following [Oracle Global Data Services Image](https://github.com/oracle/db-sharding/tree/master/docker-based-sharding-deployment/dockerfiles) * You need to change `dbImage` and `gsmImage` tag with the images you want to use in your enviornment in file `sharding_provisioning_with_free_images.yaml`. diff --git a/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.yaml b/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.yaml index 53e4191f..954ede63 100644 --- a/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.yaml +++ b/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # --- -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: ShardingDatabase metadata: name: shardingdatabase-sample @@ -41,8 +41,8 @@ spec: storageClass: oci dbImage: container-registry.oracle.com/database/free:latest dbImagePullSecret: ocr-reg-cred - gsmImage: container-registry.oracle.com/database/gsm:latest - gsmImagePullSecret: ocr-reg-cred + gsmImage: + gsmImagePullSecret: dbEdition: "free" isExternalSvc: False isDeleteOraPvc: True @@ -55,4 +55,3 @@ spec: role: primary - name: oltp_ro_svc role: primary - namespace: shns diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md index 2698bce9..ba72be25 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md @@ -1,14 +1,14 @@ -# Provisioning Oracle Globally Distributed Database Topology with System-managed sharding and Raft replication enabled by cloning database from your own Database Gold Image across Availability Domains(ADs) +# Provisioning System managed Sharding Topology with Raft replication enabled by cloning database from your own Database Gold Image across Availability Domains(ADs) **NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -In this test case, you provision the Oracle Globally Distributed Database Topology with System-managed sharding and Raft replication enabled while provisioning the Catalog and Shard Databases by cloning from an existing Oracle Database Gold Image created earlier. +In this test case, you provision the System managed Sharding Topology with Raft replication enabled while provisioning the Catalog and Shard Databases by cloning from an existing Oracle Database Gold Image created earlier. This use case applies when you want to provision the database Pods on a Kubernetes Node in any availability domain (AD), which can also be different from the availability domain (AD) of the Block Volume that has the Oracle Database Gold Image provisioned earlier. -Choosing this option takes substantially less time during the Oracle Globally Distributed Database Topology setup across ADs. +Choosing this option takes substantially less time during the Oracle Database Sharding Topology setup across ADs. NOTE: @@ -21,10 +21,10 @@ NOTE: ```sh kubectl get pv -n shns ``` -2. Create a Block Volume Backup for this Block Volume, and use the OCID of the Block Volume Backup in the next step. This example uses `snr_ssharding_shard_prov_clone_across_ads.yaml` to provision an Oracle Globally Distributed Database topology using Oracle Sharding Database controller with: +2. Create a Block Volume Backup for this Block Volume, and use the OCID of the Block Volume Backup in the next step. This example uses `snr_ssharding_shard_prov_clone_across_ads.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three Shard Database Pods: `shard1`, `shard2` and `shard3` +* Three sharding Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Database Cloning from the `BLOCK VOLUME FULL BACKUP` of the Persistent Volume which had the Gold Image. @@ -33,12 +33,12 @@ NOTE: NOTE: In this case, the Persistent Volume with DB Gold Image was provisioned in the Availability Domain `PHX-AD-1`. The Shards and Catalog will be provisioned across multiple Availability Domains by cloning the database. -**NOTE:** Provisioning the Oracle Globally Distributed Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov_clone_across_ads.yaml`. - * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. + * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) Use the file: [snr_ssharding_shard_prov_clone_across_ads.yaml](./snr_ssharding_shard_prov_clone_across_ads.yaml) for this use case as below: diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md index b43a9158..cf4240f7 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md @@ -1,14 +1,14 @@ -# Provisioning Oracle Globally Distributed Database Topology with System-managed sharding and Raft replication enabled by cloning database from your own Database Gold Image in the same Availability Domain(AD) +# Provisioning System managed Sharding Topology with Raft replication enabled by cloning database from your own Database Gold Image in the same Availability Domain(AD) **NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -In this test case, you provision the Oracle Globally Distributed Database Topology with System-managed sharding and Raft replication enabled while provisioning the Catalog and Shard Databases by cloning from an existing Oracle Database Gold Image created earlier. +In this test case, you provision the System managed Sharding Topology with Raft replication enabled while provisioning the Catalog and Shard Databases by cloning from an existing Oracle Database Gold Image created earlier. This use case applies when you are cloning from a Block Volume, and you can clone _only_ in the same availability domain (AD). The result is that the cloned shard database PODs can be created _only_ in the same AD where the Gold Image Block Volume is present. -Choosing this option takes substantially less time during the Oracle Globally Distributed Database Topology setup. +Choosing this option takes substantially less time during the Oracle Database Sharding Topology setup. **NOTE** For this step, the Persistent Volume that has the Oracle Database Gold Image is identified using its OCID. @@ -18,10 +18,10 @@ Choosing this option takes substantially less time during the Oracle Globally Di kubectl get pv -n shns ``` -2. This example uses `snr_ssharding_shard_prov_clone.yaml` to provision an Oracle Globally Distributed Database topology using Oracle Sharding controller with: +2. This example uses `snr_ssharding_shard_prov_clone.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three Shard Database Pods: `shard1`, `shard2` and `shard3` +* Three sharding Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Database Cloning from the Database Gold Image present in Persistent Volume having OCID: `ocid1.volume.oc1.phx.abyhqljr3z3w72t6ay5eud7d5w3kdfhktfp6gwb6euy5tzwfaxgmbvwqlvsq` @@ -29,14 +29,14 @@ Choosing this option takes substantially less time during the Oracle Globally Di NOTE: In this case, the Persistent Volume with DB Gold Image was provisioned in the Availability Domain `PHX-AD-1`. The Shards and Catalog will be provisioned in the same Availability Domain `PHX-AD-1` by cloning the database. -**NOTE:** Provisioning the Oracle Globally Distributed Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. - In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov_clone.yaml`. * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. + Use the file: [snr_ssharding_shard_prov_clone.yaml](./snr_ssharding_shard_prov_clone.yaml) for this use case as below: 1. Deploy the `snr_ssharding_shard_prov_clone.yaml` file: diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_chunks_specified.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_chunks_specified.md index d6171986..44972090 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_chunks_specified.md +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_chunks_specified.md @@ -1,19 +1,19 @@ -# Provisioning Oracle Globally Distributed Database Topology with System-managed sharding and Raft replication enabled with number of chunks specified +# Provisioning System-Managed Sharding Topology with Raft replication enabled with number of chunks specified **NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Globally Distributed Database topology with System-Managed sharding and RAFT Replication enabled is deployed using Oracle Sharding controller. +In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with System-Managed with RAFT Replication enabled is deployed using Oracle Sharding controller. **NOTE** In this use case, because DBCA creates the database automatically during the deployment, the time required to create the database is greater than the time it takes when the database is created by cloning from a Database Gold Image. -By default, the System-Managed with RAFT Replication deploys the Oracle Globally Distributed Database with 360 chunks per Shard Database (because there are 3 chunks created for each replication unit). In this example, the Oracle Globally Distributed Database will be deployed with non-default number of chunks specified using parameter `CATALOG_CHUNKS`. +By default, the System-Managed with RAFT Replication deploys the Sharded Database with 360 chunks per Shard Database (because there are 3 chunks created for each replication unit). In this example, the Sharded Database will be deployed with non-default number of chunks specified using parameter `CATALOG_CHUNKS`. -This example uses `snr_ssharding_shard_prov_chunks.yaml` to provision an Oracle Globally Distributed Database topology using Oracle Sharding controller with: +This example uses `snr_ssharding_shard_prov_chunks.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three Shard Database Pods: `shard1`, `shard2` and `shard3` +* Three sharding Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Total number of chunks as `120` specified by variable `CATALOG_CHUNKS` (it will be 120 chunks per shard) * Namespace: `shns` diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_control_on_resources.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_control_on_resources.md index c432310d..9cfd6afb 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_control_on_resources.md +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_control_on_resources.md @@ -1,15 +1,15 @@ -# Provisioning Oracle Globally Distributed Database Topology with System-managed sharding and Raft replication enabled with additional control on resources like Memory and CPU allocated to Pods +# Provisioning System-Managed Sharding Topology with Raft replication enabled with additional control on resources like Memory and CPU allocated to Pods **NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -In this use case, there are additional tags used to control resources such as CPU and Memory used by the different Pods when the Oracle Globally Distributed Database Topology with System-managed sharding and RAFT Replication is deployed using Oracle Sharding controller. +In this use case, there are additional tags used to control resources such as CPU and Memory used by the different Pods when the Oracle Sharding topology with System-Managed with RAFT Replication is deployed using Oracle Sharding controller. This example uses `snr_ssharding_shard_prov_memory_cpu.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three Shard Database Pods: `shard1`, `shard2` and `shard3` +* Three sharding Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Tags `memory` and `cpu` to control the Memory and CPU of the PODs diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_notification_using_oci_notification.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_notification_using_oci_notification.md index d45b2911..d4cb11de 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_notification_using_oci_notification.md +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_notification_using_oci_notification.md @@ -1,15 +1,15 @@ -# Provisioning Oracle Globally Distributed Database Topology with System-managed sharding and Raft replication enabled and send Notification using OCI Notification Service +# Provisioning System managed Sharding Topology with Raft replication enabled and send Notification using OCI Notification Service **NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -This use case demonstrates how to use a notification service like OCI Notification service to send an email notification when a particular operation is completed on an Oracle Globally Distributed Database Topology provisioned using the Oracle Database sharding controller. +This use case demonstrates how to use a notification service like OCI Notification service to send an email notification when a particular operation is completed on an Oracle Database sharding topology provisioned using the Oracle Database sharding controller. -This example uses `snr_ssharding_shard_prov_send_notification.yaml` to provision an Oracle Globally Distributed Database Topology using Oracle Sharding controller with: +This example uses `snr_ssharding_shard_prov_send_notification.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three Shard Database Pods: `shard1`, `shard2` and `shard3` +* Three sharding Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Database Cloning from the `BLOCK VOLUME FULL BACKUP` of the Persistent Volume that has the Database Gold Image created earlier. @@ -64,14 +64,14 @@ To do this: kubectl describe secret my-secret -n shns ``` -**NOTE:** Provisioning the Oracle Globally Distributed Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. - In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov_send_notification.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. + Use the file: [snr_ssharding_shard_prov_send_notification.yaml](./snr_ssharding_shard_prov_send_notification.yaml) for this use case as below: 1. Deploy the `snr_ssharding_shard_prov_send_notification.yaml` file: diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_without_db_gold_image.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_without_db_gold_image.md index c8568f1e..892741a5 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_without_db_gold_image.md +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_without_db_gold_image.md @@ -1,17 +1,17 @@ -# Provisioning Oracle Globally Distributed Database Topology with System-managed sharding and Raft replication enabled without Database Gold Image +# Provisioning System-Managed Sharding Topology with Raft replication enabled without Database Gold Image **NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Globally Distributed Database topology with System-Managed sharding and RAFT Replication enabled is deployed using Oracle Sharding controller. +In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with System-Managed with RAFT Replication enabled is deployed using Oracle Sharding controller. **NOTE** In this use case, because DBCA creates the database automatically during the deployment, the time required to create the database is greater than the time it takes when the database is created by cloning from a Database Gold Image. -This example uses `snr_ssharding_shard_prov.yaml` to provision an Oracle Globally Distributed Database topology using Oracle Sharding controller with: +This example uses `snr_ssharding_shard_prov.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three Shard Database Pods: `shard1`, `shard2` and `shard3` +* Three sharding Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * `RAFT Replication` enabled diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_in_delete_an_existing_shard.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_in_delete_an_existing_shard.md index dc026a7c..fe3157ec 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_in_delete_an_existing_shard.md +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_in_delete_an_existing_shard.md @@ -1,4 +1,4 @@ -# Scale In - Delete an existing Shard from a working Oracle Globally Distributed Database provisioned earlier with System-Managed Sharding and RAFT reolication enabled +# Scale In - Delete an existing Shard from a working Oracle Sharded Database provisioned earlier with System-Managed Sharding with RAFT reolication enabled **NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** @@ -8,10 +8,10 @@ This use case demonstrates how to delete an existing Shard from an existing Orac **NOTE** The deletion of a shard is done after verifying the Chunks have been moved out of that shard. -In this use case, the existing Oracle Globally Distributed Database is having: +In this use case, the existing database Sharding is having: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Five Shard Database Pods: `shard1`,`shard2`,`shard3`,`shard4` and `shard5` +* Five sharding Pods: `shard1`,`shard2`,`shard3`,`shard4` and `shard5` * One Catalog Pod: `catalog` * Namespace: `shns` * `RAFT Replication` enabled diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_out_add_shards.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_out_add_shards.md index 962bf64c..03423e72 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_out_add_shards.md +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_out_add_shards.md @@ -1,15 +1,15 @@ -# Scale Out - Add Shards to an existing Oracle Globally Distributed Database Topology provisioned earlier with System-Managed Sharding and RAFT replication enabled +# Scale Out - Add Shards to an existing Oracle Sharded Database provisioned earlier with System-Managed Sharding with RAFT replication enabled **NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -This use case demonstrates adding a new shard to an existing Oracle Globally Distributed Database topology with System-Managed sharding with RAFT Replication enabled provisioned earlier using Oracle Database Sharding controller. +This use case demonstrates adding a new shard to an existing Oracle Database sharding topology with System-Managed with RAFT Replication enabled provisioned earlier using Oracle Database Sharding controller. -In this use case, the existing Oracle Globally Distributed Database topology is having: +In this use case, the existing Oracle Database sharding topology is having: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three Shard Database Pods: `shard1`, `shard2` and `shard3` +* Three sharding Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * `RAFT Replication` enabled @@ -18,7 +18,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov_extshard.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) - * If the existing Oracle Globally Distributed Database Topology was deployed using [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then the additional parameter `dbEdition: "free"` will be needed for the below .yaml file as well. + * If the existing Sharding Topology was deployed using [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then the additional parameter `dbEdition: "free"` will be needed for the below .yaml file as well. This use case adds two new shards `shard4`,`shard5` to above Sharding Topology. diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov.yaml index efe3abec..aabd8470 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov.yaml +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # --- -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: ShardingDatabase metadata: name: shardingdatabase-sample @@ -55,4 +55,3 @@ spec: role: primary - name: oltp_ro_svc role: primary - namespace: shns \ No newline at end of file diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_chunks.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_chunks.yaml index a79eafdc..def7e73a 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_chunks.yaml +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_chunks.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # --- -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: ShardingDatabase metadata: name: shardingdatabase-sample @@ -58,4 +58,3 @@ spec: role: primary - name: oltp_ro_svc role: primary - namespace: shns diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone.yaml index 218fda0a..8f17331e 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone.yaml +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # --- -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: ShardingDatabase metadata: name: shardingdatabase-sample @@ -80,4 +80,3 @@ spec: role: primary - name: oltp_ro_svc role: primary - namespace: shns \ No newline at end of file diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone_across_ads.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone_across_ads.yaml index 4eb3954a..d0c1c6e0 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone_across_ads.yaml +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone_across_ads.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # --- -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: ShardingDatabase metadata: name: shardingdatabase-sample @@ -88,4 +88,3 @@ spec: role: primary - name: oltp_ro_svc role: primary - namespace: shns \ No newline at end of file diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_delshard.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_delshard.yaml index 145ef616..0859b089 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_delshard.yaml +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_delshard.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # --- -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: ShardingDatabase metadata: name: shardingdatabase-sample @@ -66,4 +66,3 @@ spec: role: primary - name: oltp_ro_svc role: primary - namespace: shns \ No newline at end of file diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_extshard.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_extshard.yaml index ea0c05a5..123b3ae1 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_extshard.yaml +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_extshard.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # --- -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: ShardingDatabase metadata: name: shardingdatabase-sample @@ -65,4 +65,3 @@ spec: role: primary - name: oltp_ro_svc role: primary - namespace: shns \ No newline at end of file diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_memory_cpu.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_memory_cpu.yaml index 5c15c724..0cfccf9a 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_memory_cpu.yaml +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_memory_cpu.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # --- -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: ShardingDatabase metadata: name: shardingdatabase-sample @@ -86,4 +86,3 @@ spec: role: primary - name: oltp_ro_svc role: primary - namespace: shns \ No newline at end of file diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_send_notification.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_send_notification.yaml index 50c85443..345e9c09 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_send_notification.yaml +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_send_notification.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # --- -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: ShardingDatabase metadata: name: shardingdatabase-sample @@ -82,4 +82,3 @@ spec: role: primary - name: oltp_ro_svc role: primary - namespace: shns \ No newline at end of file diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md index e015f916..e457b7eb 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md @@ -1,4 +1,4 @@ -# Provisioning Oracle Globally Distributed Database with System-Managed Sharding by cloning database from your own Database Gold Image across Availability Domains(ADs) +# Provisioning Oracle Sharded Database with System-Managed Sharding by cloning database from your own Database Gold Image across Availability Domains(ADs) **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. @@ -22,7 +22,7 @@ NOTE: 2. Create a Block Volume Backup for this Block Volume, and use the OCID of the Block Volume Backup in the next step. This example uses `ssharding_shard_prov_clone_across_ads.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three Shard Database Pods: `shard1`, `shard2` and `shard3` +* Three sharding Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Database Cloning from the `BLOCK VOLUME FULL BACKUP` of the Persistent Volume which had the Gold Image. @@ -31,11 +31,11 @@ NOTE: NOTE: In this case, the Persistent Volume with DB Gold Image was provisioned in the Availability Domain `PHX-AD-1`. The Shards and Catalog will be provisioned across multiple Availability Domains by cloning the database. -**NOTE:** Provisioning the Oracle Globally Distributed Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. - * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov_clone_across_ads.yaml`. + * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov_clone_across_ads.yaml`. * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md index fb16e3cd..cb01fa0d 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md @@ -1,4 +1,4 @@ -# Provisioning Oracle Globally Distributed Database with System-Managed Sharding by cloning database from your own Database Gold Image in the same Availability Domain(AD) +# Provisioning Oracle Sharded Database with System-Managed Sharding by cloning database from your own Database Gold Image in the same Availability Domain(AD) **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. @@ -19,12 +19,12 @@ Choosing this option takes substantially less time during the Oracle Globally Di 2. This example uses `ssharding_shard_prov_clone.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three Shard Database Pods: `shard1`, `shard2` and `shard3` +* Three sharding Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Database Cloning from the Database Gold Image present in Persistent Volume having OCID: `ocid1.volume.oc1.phx.abyhqljr3z3w72t6ay5eud7d5w3kdfhktfp6gwb6euy5tzwfaxgmbvwqlvsq` -**NOTE:** Provisioning the Oracle Globally Distributed Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. NOTE: In this case, the Persistent Volume with DB Gold Image was provisioned in the Availability Domain `PHX-AD-1`. The Shards and Catalog will be provisioned in the same Availability Domain `PHX-AD-1` by cloning the database. diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_chunks_specified.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_chunks_specified.md index b824ab03..0c6ea8fe 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_chunks_specified.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_chunks_specified.md @@ -1,4 +1,4 @@ -# Provisioning Oracle Globally Distributed Database with System-Managed Sharding with number of chunks specified +# Provisioning Oracle Sharded Database with System-Managed Sharding with number of chunks specified **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. @@ -6,12 +6,12 @@ In this use case, the database is created automatically using DBCA during the pr **NOTE:** In this use case, because DBCA creates the database automatically during the deployment, the time required to create the database is greater than the time it takes when the database is created by cloning from a Database Gold Image. -By default, the System-Managed Sharding deploys the Oracle Globally Distributed Database with 120 chunks per Shard Database. For example, if we have three shards in the Oracle Globally Distributed Database, it will be total of 360 chunks. In this example, the Oracle Globally Distributed Database will be deployed with non-default number of chunks specified using parameter `CATALOG_CHUNKS`. +By default, the System-Managed Sharding deploys the Sharded Database with 120 chunks per Shard Database. If, for example, we have three shards in the Sharded Database, it will be total of 360 chunks. In this example, the Sharded Database will be deployed with non-default number of chunks specified using parameter `CATALOG_CHUNKS`. This example uses `ssharding_shard_prov_chunks.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three Shard Database Pods: `shard1`, `shard2` and `shard3` +* Three sharding Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Total number of chunks as `120` specified by variable `CATALOG_CHUNKS` (it will be 40 chunks per shard) * Namespace: `shns` diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_control_on_resources.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_control_on_resources.md index 153a40a9..c4f45a48 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_control_on_resources.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_control_on_resources.md @@ -1,4 +1,4 @@ -# Provisioning Oracle Globally Distributed Database with System-Managed Sharding with additional control on resources like Memory and CPU allocated to Pods +# Provisioning Oracle Sharded Database with System-Managed Sharding with additional control on resources like Memory and CPU allocated to Pods **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. @@ -7,7 +7,7 @@ In this use case, there are additional tags used to control resources such as CP This example uses `ssharding_shard_prov_memory_cpu.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three Shard Database Pods: `shard1`, `shard2` and `shard3` +* Three sharding Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Tags `memory` and `cpu` to control the Memory and CPU of the PODs diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_notification_using_oci_notification.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_notification_using_oci_notification.md index 7ec24439..1a6a1ee3 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_notification_using_oci_notification.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_notification_using_oci_notification.md @@ -1,13 +1,13 @@ -# Provisioning Oracle Globally Distributed Database with System-Managed Sharding and send Notification using OCI Notification Service +# Provisioning Oracle Sharded Database with System-Managed Sharding and send Notification using OCI Notification Service **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -This use case demonstrates how to use a notification service like OCI Notification service to send an email notification when a particular operation is completed on an Oracle Globally Distributed Database topology provisioned using the Oracle Database sharding controller. +This use case demonstrates how to use a notification service like OCI Notification service to send an email notification when a particular operation is completed on an Oracle Database sharding topology provisioned using the Oracle Database sharding controller. This example uses `ssharding_shard_prov_send_notification.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three Shard Database Pods: `shard1`, `shard2` and `shard3` +* Three sharding Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Database Cloning from the `BLOCK VOLUME FULL BACKUP` of the Persistent Volume that has the Database Gold Image created earlier. @@ -67,7 +67,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. -**NOTE:** Provisioning the Oracle Globally Distributed Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. Use the file: [ssharding_shard_prov_send_notification.yaml](./ssharding_shard_prov_send_notification.yaml) for this use case as below: diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_without_db_gold_image.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_without_db_gold_image.md index b262407f..b223d1af 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_without_db_gold_image.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_without_db_gold_image.md @@ -1,4 +1,4 @@ -# Provisioning Oracle Globally Distributed Database with System-Managed Sharding without Database Gold Image +# Provisioning Oracle Sharded Database with System-Managed Sharding without Database Gold Image **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. @@ -9,7 +9,7 @@ In this use case, the database is created automatically using DBCA during the pr This example uses `ssharding_shard_prov.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three Shard Database Pods: `shard1`, `shard2` and `shard3` +* Three sharding Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` diff --git a/docs/sharding/provisioning/system_sharding/ssharding_scale_in_delete_an_existing_shard.md b/docs/sharding/provisioning/system_sharding/ssharding_scale_in_delete_an_existing_shard.md index 8cac01cd..bca34253 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_scale_in_delete_an_existing_shard.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_scale_in_delete_an_existing_shard.md @@ -1,4 +1,4 @@ -# Scale In - Delete an existing Shard from a working Oracle Globally Distributed Database provisioned earlier with System-Managed Sharding +# Scale In - Delete an existing Shard from a working Oracle Sharded Database provisioned earlier with System-Managed Sharding **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. @@ -6,10 +6,10 @@ This use case demonstrates how to delete an existing Shard from an existing Orac **NOTE** The deletion of a shard is done after verifying the Chunks have been moved out of that shard. -In this use case, the existing Oracle Globally Distributed Database is having: +In this use case, the existing database Sharding is having: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Five Shard Database Pods: `shard1`,`shard2`,`shard3`,`shard4` and `shard5` +* Five sharding Pods: `shard1`,`shard2`,`shard3`,`shard4` and `shard5` * One Catalog Pod: `catalog` * Namespace: `shns` @@ -21,7 +21,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services NOTE: Use tag `isDelete: enable` to delete the shard you want. -This use case deletes the shard `shard4` from the above Oracle Globally Distributed Database Topology. +This use case deletes the shard `shard4` from the above Sharding Topology. Use the file: [ssharding_shard_prov_delshard.yaml](./ssharding_shard_prov_delshard.yaml) for this use case as below: diff --git a/docs/sharding/provisioning/system_sharding/ssharding_scale_out_add_shards.md b/docs/sharding/provisioning/system_sharding/ssharding_scale_out_add_shards.md index 091a01a0..1db8e6c3 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_scale_out_add_shards.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_scale_out_add_shards.md @@ -1,13 +1,13 @@ -# Scale Out - Add Shards to an existing Oracle Globally Distributed Database provisioned earlier with System-Managed Sharding +# Scale Out - Add Shards to an existing Oracle Sharded Database provisioned earlier with System-Managed Sharding **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. This use case demonstrates adding a new shard to an existing Oracle Database sharding topology with System-Managed Sharding provisioned earlier using Oracle Database Sharding controller. -In this use case, the existing Oracle Globally Distributed Database topology is having: +In this use case, the existing Oracle Database sharding topology is having: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three Shard Database Pods: `shard1`, `shard2` and `shard3` +* Three sharding Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` @@ -15,9 +15,9 @@ In this example, we are using pre-built Oracle Database and Global Data Services * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov_extshard.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) - * If the existing Oracle Globally Distributed Database Topology was deployed using [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then the additional parameter `dbEdition: "free"` will be needed for the below .yaml file as well. + * If the existing Sharding Topology was deployed using [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then the additional parameter `dbEdition: "free"` will be needed for the below .yaml file as well. -This use case adds two new shards `shard4`,`shard5` to above Oracle Globally Distributed Database Topology. +This use case adds two new shards `shard4`,`shard5` to above Sharding Topology. Use the file: [ssharding_shard_prov_extshard.yaml](./ssharding_shard_prov_extshard.yaml) for this use case as below: diff --git a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov.yaml b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov.yaml index 7d4e16ec..5adbd2ce 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov.yaml +++ b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # --- -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: ShardingDatabase metadata: name: shardingdatabase-sample @@ -54,4 +54,3 @@ spec: role: primary - name: oltp_ro_svc role: primary - namespace: shns diff --git a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_chunks.yaml b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_chunks.yaml new file mode 100644 index 00000000..5c135229 --- /dev/null +++ b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_chunks.yaml @@ -0,0 +1,59 @@ +# +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. +# +--- +apiVersion: database.oracle.com/v4 +kind: ShardingDatabase +metadata: + name: shardingdatabase-sample + namespace: shns +spec: + shard: + - name: shard1 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard2 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + - name: shard3 + storageSizeInGb: 50 + imagePullPolicy: "Always" + shardGroup: shardgroup1 + shardRegion: primary + catalog: + - name: catalog + storageSizeInGb: 50 + imagePullPolicy: "Always" + envVars: + - name: "CATALOG_CHUNKS" + value: "120" + gsm: + - name: gsm1 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: primary + - name: gsm2 + imagePullPolicy: "Always" + storageSizeInGb: 50 + region: standby + storageClass: oci + dbImage: container-registry.oracle.com/database/enterprise:latest + dbImagePullSecret: ocr-reg-cred + gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImagePullSecret: ocr-reg-cred + isExternalSvc: False + isDeleteOraPvc: True + dbSecret: + name: db-user-pass-rsa + pwdFileName: pwdfile.enc + keyFileName: key.pem + gsmService: + - name: oltp_rw_svc + role: primary + - name: oltp_ro_svc + role: primary diff --git a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_clone.yaml b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_clone.yaml index 9a690626..f5816a87 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_clone.yaml +++ b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_clone.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # --- -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: ShardingDatabase metadata: name: shardingdatabase-sample @@ -79,4 +79,3 @@ spec: role: primary - name: oltp_ro_svc role: primary - namespace: shns diff --git a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_clone_across_ads.yaml b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_clone_across_ads.yaml index b7dd1397..8fee0526 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_clone_across_ads.yaml +++ b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_clone_across_ads.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # --- -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: ShardingDatabase metadata: name: shardingdatabase-sample @@ -87,4 +87,3 @@ spec: role: primary - name: oltp_ro_svc role: primary - namespace: shns diff --git a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_delshard.yaml b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_delshard.yaml index 75caca31..3902ceef 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_delshard.yaml +++ b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_delshard.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # --- -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: ShardingDatabase metadata: name: shardingdatabase-sample @@ -65,4 +65,3 @@ spec: role: primary - name: oltp_ro_svc role: primary - namespace: shns \ No newline at end of file diff --git a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_extshard.yaml b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_extshard.yaml index d25dc901..a11833e0 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_extshard.yaml +++ b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_extshard.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # --- -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: ShardingDatabase metadata: name: shardingdatabase-sample @@ -64,4 +64,3 @@ spec: role: primary - name: oltp_ro_svc role: primary - namespace: shns \ No newline at end of file diff --git a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_memory_cpu.yaml b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_memory_cpu.yaml index 793a3e4d..3f092b89 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_memory_cpu.yaml +++ b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_memory_cpu.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # --- -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: ShardingDatabase metadata: name: shardingdatabase-sample @@ -85,4 +85,3 @@ spec: role: primary - name: oltp_ro_svc role: primary - namespace: shns diff --git a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_send_notification.yaml b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_send_notification.yaml index 96217b74..0ca6ec6f 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_send_notification.yaml +++ b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_send_notification.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # --- -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: ShardingDatabase metadata: name: shardingdatabase-sample @@ -81,5 +81,4 @@ spec: role: primary - name: oltp_ro_svc role: primary - namespace: shns diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_from_gold_image_across_ads.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_from_gold_image_across_ads.md index dda8a350..9b2905e8 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_from_gold_image_across_ads.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_from_gold_image_across_ads.md @@ -1,12 +1,12 @@ -# Provisioning Oracle Globally Distributed Database with User-Defined Sharding by cloning database from your own Database Gold Image across Availability Domains(ADs) +# Provisioning Oracle Sharded Database with User Defined Sharding by cloning database from your own Database Gold Image across Availability Domains(ADs) **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -In this test case, you provision the Oracle Globally Distributed Database topology with User-Defined Sharding while provisioning the Catalog and Shard Databases by cloning from an existing Oracle Database Gold Image created earlier. +In this test case, you provision the Oracle Database sharding topology with User Defined Sharding while provisioning the Catalog and Shard Databases by cloning from an existing Oracle Database Gold Image created earlier. This use case applies when you want to provision the database Pods on a Kubernetes Node in any availability domain (AD), which can also be different from the availability domain (AD) of the Block Volume that has the Oracle Database Gold Image provisioned earlier. -Choosing this option takes substantially less time during the Oracle Globally Distributed Database Topology setup across ADs. +Choosing this option takes substantially less time during the Oracle Database Sharding Topology setup across ADs. NOTE: @@ -19,15 +19,15 @@ NOTE: ```sh kubectl get pv -n shns ``` -2. Create a Block Volume Backup for this Block Volume, and use the OCID of the Block Volume Backup in the next step. This example uses `udsharding_shard_prov_clone_across_ads.yaml` to provision an Oracle Globally Distributed Database topology using Oracle Sharding controller with: +2. Create a Block Volume Backup for this Block Volume, and use the OCID of the Block Volume Backup in the next step. This example uses `udsharding_shard_prov_clone_across_ads.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three Shard Database Pods: `shard1`, `shard2` and `shard3` +* Three sharding Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Database Cloning from the `BLOCK VOLUME FULL BACKUP` of the Persistent Volume which had the Gold Image. * OCID of the Block Volume Backup: `ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq` -* User-Defined Sharding is specified using `shardingType: USER` +* User Defined Sharding is specified using `shardingType: USER` NOTE: In this case, the Persistent Volume with DB Gold Image was provisioned in the Availability Domain `PHX-AD-1`. The Shards and Catalog will be provisioned across multiple Availability Domains by cloning the database. @@ -37,7 +37,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) -**NOTE:** Provisioning the Oracle Globally Distributed Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. Use the file: [udsharding_shard_prov_clone_across_ads.yaml](./udsharding_shard_prov_clone_across_ads.yaml) for this use case as below: diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_gold_image_in_same_ad.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_gold_image_in_same_ad.md index 34fa2867..a4669667 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_gold_image_in_same_ad.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_gold_image_in_same_ad.md @@ -1,4 +1,4 @@ -# Provisioning Oracle Globally Distributed Database with User-Defined Sharding by cloning database from your own Database Gold Image in the same Availability Domain(AD) +# Provisioning Oracle Sharded Database with User Defined Sharding by cloning database from your own Database Gold Image in the same Availability Domain(AD) **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. @@ -6,7 +6,7 @@ In this case, the database is created automatically by cloning from an existing This use case applies when you are cloning from a Block Volume, and you can clone _only_ in the same availability domain (AD). The result is that the cloned shard database PODs can be created _only_ in the same AD where the Gold Image Block Volume is present. -Choosing this option takes substantially less time during the Oracle Globally Distributed Database Topology setup. +Choosing this option takes substantially less time during the Oracle Database Sharding Topology setup. **NOTE** For this step, the Persistent Volume that has the Oracle Database Gold Image is identified using its OCID. @@ -16,14 +16,14 @@ Choosing this option takes substantially less time during the Oracle Globally Di kubectl get pv -n shns ``` -2. This example uses `udsharding_shard_prov_clone.yaml` to provision an Oracle Globally Distributed Database topology using Oracle Sharding controller with: +2. This example uses `udsharding_shard_prov_clone.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three Shard Database Pods: `shard1`, `shard2` and `shard3` +* Three sharding Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Database Cloning from the Database Gold Image present in Persistent Volume having OCID: `ocid1.volume.oc1.phx.abyhqljr3z3w72t6ay5eud7d5w3kdfhktfp6gwb6euy5tzwfaxgmbvwqlvsq` -* User-Defined Sharding is specified using `shardingType: USER` +* User Defined Sharding is specified using `shardingType: USER` NOTE: In this case, the Persistent Volume with DB Gold Image was provisioned in the Availability Domain `PHX-AD-1`. The Shards and Catalog will be provisioned in the same Availability Domain `PHX-AD-1` by cloning the database. @@ -33,7 +33,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) -**NOTE:** Provisioning the Oracle Globally Distributed Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. Use the file: [udsharding_shard_prov_clone.yaml](./udsharding_shard_prov_clone.yaml) for this use case as below: diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_control_on_resources.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_control_on_resources.md index 8836baeb..b52b8745 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_control_on_resources.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_control_on_resources.md @@ -1,18 +1,18 @@ -# Provisioning Oracle Globally Distributed Database with User-Defined Sharding with additional control on resources like Memory and CPU allocated to Pods +# Provisioning Oracle Sharded Database with User Defined Sharding with additional control on resources like Memory and CPU allocated to Pods **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -In this use case, there are additional tags used to control resources such as CPU and Memory used by the different Pods when the Oracle Globally Distributed Database topology with User-Defined Sharding is deployed using Oracle Sharding controller. +In this use case, there are additional tags used to control resources such as CPU and Memory used by the different Pods when the Oracle Sharding topology with User Defined Sharding is deployed using Oracle Sharding controller. This example uses `udsharding_shard_prov_memory_cpu.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three Shard Database Pods: `shard1`, `shard2` and `shard3` +* Three sharding Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Tags `memory` and `cpu` to control the Memory and CPU of the PODs * Additional tags `INIT_SGA_SIZE` and `INIT_PGA_SIZE` to control the SGA and PGA allocation at the database level -* User-Defined Sharding is specified using `shardingType: USER` +* User Defined Sharding is specified using `shardingType: USER` In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_notification_using_oci_notification.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_notification_using_oci_notification.md index ea3a2802..640301a2 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_notification_using_oci_notification.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_notification_using_oci_notification.md @@ -1,19 +1,19 @@ -# Provisioning Oracle Globally Distributed Database with User-Defined Sharding and send Notification using OCI Notification Service +# Provisioning Oracle Sharded Database with User Defined Sharding and send Notification using OCI Notification Service **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -This use case demonstrates how to use a notification service like OCI Notification service to send an email notification when a particular operation is completed on an Oracle Globally Distributed Database topology provisioned using the Oracle Database sharding controller. +This use case demonstrates how to use a notification service like OCI Notification service to send an email notification when a particular operation is completed on an Oracle Database sharding topology provisioned using the Oracle Database sharding controller. -This example uses `udsharding_shard_prov_send_notification.yaml` to provision an Oracle Globally Distributed Database topology using Oracle Sharding controller with: +This example uses `udsharding_shard_prov_send_notification.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three Shard Database Pods: `shard1`, `shard2` and `shard3` +* Three sharding Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` * Database Cloning from the `BLOCK VOLUME FULL BACKUP` of the Persistent Volume that has the Database Gold Image created earlier. * OCID of the Block Volume Backup: `ocid1.volumebackup.oc1.phx.abyhqljrxtv7tu5swqb3lzc7vpzwbwzdktd2y4k2vjjy2srmgu2w7bqdftjq` * Configmap to send notification email when a particular operation is completed. For example: When a shard is added. -* User-Defined Sharding is specified using `shardingType: USER` +* User Defined Sharding is specified using `shardingType: USER` **NOTE:** @@ -68,7 +68,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. -**NOTE:** Provisioning the Oracle Globally Distributed Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. +**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. Use the file: [udsharding_shard_prov_send_notification.yaml](./udsharding_shard_prov_send_notification.yaml) for this use case as below: diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_without_db_gold_image.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_without_db_gold_image.md index 5b1d2db0..2be5ac9f 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_without_db_gold_image.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_without_db_gold_image.md @@ -1,18 +1,18 @@ -# Provisioning Oracle Globally Distributed Database with User-Defined Sharding without Database Gold Image +# Provisioning Oracle Sharded Database with User Defined Sharding without Database Gold Image **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with User-Defined Sharding is deployed using Oracle Sharding controller. +In this use case, the database is created automatically using DBCA during the provisioning of the shard databases and the catalog database when the Oracle Sharding topology with User Defined Sharding is deployed using Oracle Sharding controller. **NOTE** In this use case, because DBCA creates the database automatically during the deployment, the time required to create the database is greater than the time it takes when the database is created by cloning from a Database Gold Image. -This example uses `udsharding_shard_prov.yaml` to provision an Oracle Globally Distributed Database topology using Oracle Sharding controller with: +This example uses `udsharding_shard_prov.yaml` to provision an Oracle Database sharding topology using Oracle Sharding controller with: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three Shard Database Pods: `shard1`, `shard2` and `shard3` +* Three sharding Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` -* User-Defined Sharding is specified using `shardingType: USER` +* User Defined Sharding is specified using `shardingType: USER` In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md index e01e606f..2c4cbfc2 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md @@ -1,16 +1,16 @@ -# Scale In - Delete an existing Shard from a working Oracle Globally Distributed Database provisioned earlier with User-Defined Sharding +# Scale In - Delete an existing Shard from a working Oracle Sharded Database provisioned earlier with User Defined Sharding **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -This use case demonstrates how to delete an existing Shard from an existing Oracle Globally Distributed Database topology with User-Defined Sharding provisioned using Oracle Database Sharding controller. +This use case demonstrates how to delete an existing Shard from an existing Oracle Database sharding topology with User Defined Sharding provisioned using Oracle Database Sharding controller. In this use case, the existing database Sharding is having: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Five Shard Database Pods: `shard1`,`shard2`,`shard3`,`shard4` and `shard5` +* Five sharding Pods: `shard1`,`shard2`,`shard3`,`shard4` and `shard5` * One Catalog Pod: `catalog` * Namespace: `shns` -* User-Defined Sharding is specified using `shardingType: USER` +* User Defined Sharding is specified using `shardingType: USER` In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. @@ -20,13 +20,13 @@ In this example, we are using pre-built Oracle Database and Global Data Services **NOTE:** Use tag `isDelete: enable` to delete the shard you want. -This use case deletes the shard `shard4` from the above Oracle Globally Distributed Database Topology. +This use case deletes the shard `shard4` from the above Sharding Topology. Use the file: [udsharding_shard_prov_delshard.yaml](./udsharding_shard_prov_delshard.yaml) for this use case as below: 1. Move out the chunks from the shard to be deleted to another shard. For example, in the current case, before deleting the `shard4`, if you want to move the chunks from `shard4` to `shard2`, then you can run the below `kubectl` command where `/u01/app/oracle/product/23ai/gsmhome_1` is the GSM HOME: ```sh - kubectl exec -it pod/gsm1-0 -n shns -- /u01/app/oracle/product/23ai/gsmhome_1/bin/gdsctl "move chunk -chunk all -source shard4_shard4pdb -target shard2_shard2pdb" + kubectl exec -it pod/gsm1-0 -n shns -- /u01/app/oracle/product/23ai/gsmhome_1/bin/gdsctl "move chunk -chunk all -source shard4_shard4pdb -target shard4_shard4pdb" ``` 2. Confirm the shard to be deleted (`shard4` in this case) is not having any chunk using below command: ```sh @@ -48,7 +48,7 @@ Use the file: [udsharding_shard_prov_delshard.yaml](./udsharding_shard_prov_dels - After you apply `udsharding_shard_prov_delshard.yaml`, the change may not be visible immediately and it may take some time for the delete operation to complete. - If the shard, that you are trying to delete, is still having chunks, then the you will see message like below in the logs of the Oracle Database Operator Pod. ```sh - DEBUG events Shard Deletion failed for [shard4]. Retry shard deletion after manually moving the chunks. Requeuing + INFO controllers.database.ShardingDatabase manual intervention required ``` In this case, you will need to first move out the chunks from the shard to be deleted using Step 2 above and then apply the file in Step 3 to delete that shard. diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_out_add_shards.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_out_add_shards.md index 371b2438..20f50b29 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_out_add_shards.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_out_add_shards.md @@ -1,24 +1,24 @@ -# Scale Out - Add Shards to an existing Oracle Globally Distributed Database provisioned earlier with User-Defined Sharding +# Scale Out - Add Shards to an existing Oracle Sharded Database provisioned earlier with User Defined Sharding **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. -This use case demonstrates adding a new shard to an existing Oracle Globally Distributed Database topology with User-Defined Sharding provisioned earlier using Oracle Database Sharding controller. +This use case demonstrates adding a new shard to an existing Oracle Database sharding topology with User Defined Sharding provisioned earlier using Oracle Database Sharding controller. -In this use case, the existing Oracle Globally Distributed Database topology is having: +In this use case, the existing Oracle Database sharding topology is having: * Primary GSM Pods `gsm1` and standby GSM Pod `gsm2` -* Three Shard Database Pods: `shard1`, `shard2` and `shard3` +* Three sharding Pods: `shard1`, `shard2` and `shard3` * One Catalog Pod: `catalog` * Namespace: `shns` -* User-Defined Sharding is specified using `shardingType: USER` +* User Defined Sharding is specified using `shardingType: USER` In this example, we are using pre-built Oracle Database and Global Data Services container images available on [Oracle Container Registry](https://container-registry.oracle.com/) * To pull the above images from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `udsharding_shard_prov_extshard.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) - * If the existing Oracle Globally Distributed Database Topology was deployed using [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then the additional parameter `dbEdition: "free"` will be needed for the below .yaml file as well. + * If the existing Sharding Topology was deployed using [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then the additional parameter `dbEdition: "free"` will be needed for the below .yaml file as well. -This use case adds two new shards `shard4`,`shard5` to above Oracle Globally Distributed Database Topology. +This use case adds two new shards `shard4`,`shard5` to above Sharding Topology. Use the file: [udsharding_shard_prov_extshard.yaml](./udsharding_shard_prov_extshard.yaml) for this use case as below: diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov.yaml b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov.yaml index 9b565b73..d33be599 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov.yaml +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # --- -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: ShardingDatabase metadata: name: shardingdatabase-sample @@ -55,4 +55,3 @@ spec: role: primary - name: oltp_ro_svc role: primary - namespace: shns diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_clone.yaml b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_clone.yaml index adc2271f..04ee5d95 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_clone.yaml +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_clone.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # --- -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: ShardingDatabase metadata: name: shardingdatabase-sample @@ -80,4 +80,3 @@ spec: role: primary - name: oltp_ro_svc role: primary - namespace: shns diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_clone_across_ads.yaml b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_clone_across_ads.yaml index 28f36608..5be6ecde 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_clone_across_ads.yaml +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_clone_across_ads.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # --- -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: ShardingDatabase metadata: name: shardingdatabase-sample @@ -88,4 +88,3 @@ spec: role: primary - name: oltp_ro_svc role: primary - namespace: shns diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_delshard.yaml b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_delshard.yaml index 2342dc55..e00d2272 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_delshard.yaml +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_delshard.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # --- -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: ShardingDatabase metadata: name: shardingdatabase-sample @@ -66,4 +66,3 @@ spec: role: primary - name: oltp_ro_svc role: primary - namespace: shns \ No newline at end of file diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_extshard.yaml b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_extshard.yaml index f45d421f..3899f2ab 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_extshard.yaml +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_extshard.yaml @@ -2,7 +2,7 @@ # Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: ShardingDatabase metadata: name: shardingdatabase-sample @@ -64,4 +64,3 @@ spec: role: primary - name: oltp_ro_svc role: primary - namespace: shns diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_memory_cpu.yaml b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_memory_cpu.yaml index e663aa65..6c65916e 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_memory_cpu.yaml +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_memory_cpu.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # --- -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: ShardingDatabase metadata: name: shardingdatabase-sample @@ -86,4 +86,3 @@ spec: role: primary - name: oltp_ro_svc role: primary - namespace: shns diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_send_notification.yaml b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_send_notification.yaml index afd951fe..ef1b5561 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_send_notification.yaml +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_send_notification.yaml @@ -3,7 +3,7 @@ # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # --- -apiVersion: database.oracle.com/v1alpha1 +apiVersion: database.oracle.com/v4 kind: ShardingDatabase metadata: name: shardingdatabase-sample @@ -82,4 +82,3 @@ spec: role: primary - name: oltp_ro_svc role: primary - namespace: shns diff --git a/docs/sidb/PREREQUISITES.md b/docs/sidb/PREREQUISITES.md index b904f5da..4bf09283 100644 --- a/docs/sidb/PREREQUISITES.md +++ b/docs/sidb/PREREQUISITES.md @@ -3,27 +3,26 @@ To deploy Oracle Single Instance Database in Kubernetes using the OraOperator, c * ### Prepare Oracle Container Images - Build Single Instance Database Container Images from source, following the instructions at [https://github.com/oracle/docker-images/tree/main/OracleDatabase/SingleInstance](https://github.com/oracle/docker-images/tree/main/OracleDatabase/SingleInstance), or - use the pre-built images available at [https://container-registry.oracle.com](https://container-registry.oracle.com) by signing in and accepting the required license agreement. + You can either build Single Instance Database Container Images from the source, following the instructions at [https://github.com/oracle/docker-images/tree/main/OracleDatabase/SingleInstance](https://github.com/oracle/docker-images/tree/main/OracleDatabase/SingleInstance), or you can use the the pre-built images available at [https://container-registry.oracle.com](https://container-registry.oracle.com) by signing in and accepting the required license agreement. - Oracle Database Releases Supported: Enterprise and Standard Edition for Oracle Database 19c, and later releases. Express Edition for Oracle Database 21.3.0 only. Oracle Database Free 23.2.0 and later Free releases + Oracle Database Releases Supported: Enterprise and Standard Edition for Oracle Database 19c, and later releases. Express Edition for Oracle Database 21.3.0 only. Oracle Database Free 23.2.0 and later Free releases Build Oracle REST Data Service Container Images from source following the instructions at [https://github.com/oracle/docker-images/tree/main/OracleRestDataServices](https://github.com/oracle/docker-images/tree/main/OracleRestDataServices). - Supported Oracle REST Data Service version is 21.4.2 + The supported Oracle REST Data Service version is 21.4.2 * ### Ensure Sufficient Disk Space in Kubernetes Worker Nodes - Provision Kubernetes worker nodes with recommended 250 GiB or more of free disk space required for pulling the base and patched database container images. If deploying on cloud you may choose to increase the custom boot volume size of the worker nodes. + Provision Kubernetes worker nodes. Oracle recommends you provision them with 250 GB or more free disk space, which is required for pulling the base and patched database container images. If you are doing a Cloud deployment, then you can choose to increase the custom boot volume size of the worker nodes. * ### Set Up Kubernetes and Volumes for Database Persistence Set up an on-premises Kubernetes cluster, or subscribe to a managed Kubernetes service, such as Oracle Cloud Infrastructure Container Engine for Kubernetes. Use a dynamic volume provisioner or pre-provision static persistent volumes manually. These volumes are required for persistent storage of the database files. - More info on creating persistent volumes available at [https://kubernetes.io/docs/concepts/storage/persistent-volumes/](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) + For more more information about creating persistent volumes, see: [https://kubernetes.io/docs/concepts/storage/persistent-volumes/](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) * ### Minikube Cluster Environment - By default, Minikube creates a node with 2GB RAM, 2 CPUs, and 20GB disk space when a cluster is created using `minikube start` command. However, these resources (particularly disk space and RAM) may not be sufficient for running and managing Oracle Database using the OraOperator. It is recommended to have larger RAM and disk space for better performance. For example, the following command creates a Minikube cluster with 8GB RAM and 100GB disk space for the Minikube VM: + By default, when you create a cluster using the `minicube start` command, Minikube creates a node with 2GB RAM, 2 CPUs, and 20GB disk space. However, these resources (particularly disk space and RAM) may not be sufficient for running and managing Oracle Database using the OraOperator. For better performance, Oracle recommends that you configure the cluster to have a larger RAM and disk space than the Minikube default. For example, the following command creates a Minikube cluster with 8GB RAM and 100GB disk space for the Minikube VM: ``` minikube start --memory=8g --disk-size=100g diff --git a/docs/sidb/README.md b/docs/sidb/README.md index ff357195..35f42f22 100644 --- a/docs/sidb/README.md +++ b/docs/sidb/README.md @@ -12,6 +12,8 @@ Oracle Database Operator for Kubernetes (`OraOperator`) includes the Single Inst * [Pre-built Database](#pre-built-database) * [XE Database](#xe-database) * [Free Database](#free-database) + * [Free Lite Database](#free-lite-database) + * [Oracle True Cache](#oracle-true-cache) * [Connecting to Database](#connecting-to-database) * [Database Persistence (Storage) Configuration Options](#database-persistence-storage-configuration-options) * [Dynamic Persistence](#dynamic-persistence) @@ -29,17 +31,21 @@ Oracle Database Operator for Kubernetes (`OraOperator`) includes the Single Inst * [Setup Database with LoadBalancer](#setup-database-with-loadbalancer) * [Enabling TCPS Connections](#enabling-tcps-connections) * [Specifying Custom Ports](#specifying-custom-ports) - * [Setup Data Guard Configuration for a Single Instance Database (Preview status)](#setup-data-guard-configuration-for-a-single-instance-database-preview-status) + * [Setup Data Guard Configuration for a Single Instance Database](#setup-data-guard-configuration-for-a-single-instance-database) * [Create a Standby Database](#create-a-standby-database) * [Create a Data Guard Configuration](#create-a-data-guard-configuration) * [Perform a Switchover](#perform-a-switchover) - * [Patch Primary and Standby databases in Data Guard configuration](#patch-primary-and-standby-databases-in-data-guard-configuration) + * [Enable Fast-Start Failover](#enable-fast-start-failover) + * [Convert Standby to Snapshot Standby](#convert-standby-to-snapshot-standby) + * [Static Data Guard Connect String](#static-data-guard-connect-string) + * [Patch Primary and Standby databases](#patch-primary-and-standby-databases) * [Delete the Data Guard Configuration](#delete-the-data-guard-configuration) * [Execute Custom Scripts](#execute-custom-scripts) * [OracleRestDataService Resource](#oraclerestdataservice-resource) * [REST Enable a Database](#rest-enable-a-database) * [Provision ORDS](#provision-ords) * [Database API](#database-api) + * [MongoDB API](#mongodb-api) * [Advanced Usages](#advanced-usages) * [Oracle Data Pump](#oracle-data-pump) * [REST Enabled SQL](#rest-enabled-sql) @@ -52,7 +58,7 @@ Oracle Database Operator for Kubernetes (`OraOperator`) includes the Single Inst ## Prerequisites -Oracle strongly recommends to comply with the [prerequisites](./PREREQUISITES.md) and the following requirements +Oracle strongly recommends that you comply with the [prerequisites](./PREREQUISITES.md) and the following requirements ### Mandatory Resource Privileges @@ -67,11 +73,11 @@ Oracle strongly recommends to comply with the [prerequisites](./PREREQUISITES.md | Secrets | create delete get list patch update watch | | Events | create patch | - For managing the required levels of access, configure [role binding](../../README.md#role-binding-for-access-management) + For managing the required levels of access, configure [role binding](../../README.md#create-role-bindings-for-access-management) ### Optional Resource Privileges - Single Instance Database(sidb) controller optionally requires the following Kubernetes resource privileges depending on the functionality being used: + Single Instance Database(`sidb`) controller optionally requires the following Kubernetes resource privileges, depending on the functionality being used: | Functionality | Resources | Privileges | | --- | --- | --- | @@ -80,7 +86,7 @@ Oracle strongly recommends to comply with the [prerequisites](./PREREQUISITES.md | Custom Scripts Execution | PersistentVolumes | get list watch | - For exposing the database via Nodeport services, apply [RBAC](../../rbac/node-rbac.yaml) + For exposing the database using Nodeport services, apply [RBAC](../../rbac/node-rbac.yaml) ```sh kubectl apply -f rbac/node-rbac.yaml ``` @@ -88,22 +94,22 @@ Oracle strongly recommends to comply with the [prerequisites](./PREREQUISITES.md ```sh kubectl apply -f rbac/storage-class-rbac.yaml ``` - For automatic execution of custom scripts post database setup or startup, apply [RBAC](../../rbac/persistent-volume-rbac.yaml) + For automatic execution of custom scripts after database setup or startup, apply [RBAC](../../rbac/persistent-volume-rbac.yaml) ```sh kubectl apply -f rbac/persistent-volume-rbac.yaml ``` ### OpenShift Security Context Constraints - OpenShift requires additional Security Context Constraints (SCC) for deploying and managing the SingleInstanceDatabase resource. Follow these steps to create the appropriate SCCs before deploying the SingleInstanceDatabase resource. + OpenShift requires additional Security Context Constraints (SCC) for deploying and managing the `SingleInstanceDatabase` resource. To create the appropriate SCCs before deploying the `SingleInstanceDatabase` resource, complete these steps: - 1. Create a new project/namespace for deploying the SingleInstanceDatabase resource + 1. Create a new project/namespace for deploying the `SingleInstanceDatabase` resource ```sh oc new-project sidb-ns ``` - **Note:** OpenShift recommends not to deploy in namespaces starting with `kube`, `openshift` and the `default` namespace. + **Note:** OpenShift recommends that you should not deploy in namespaces starting with `kube`, `openshift` and the `default` namespace. 2. Apply the file [openshift_rbac.yaml](../../config/samples/sidb/openshift_rbac.yaml) with cluster-admin user privileges. @@ -111,15 +117,15 @@ Oracle strongly recommends to comply with the [prerequisites](./PREREQUISITES.md oc apply -f openshift-rbac.yaml ``` - This would result in creation of SCC (Security Context Constraints) and serviceaccount `sidb-sa` in the namespace `sidb-ns` which has access to the SCC. + Running this example procedure results in creation of SCC (Security Context Constraints) and serviceaccount `sidb-sa` in the namespace `sidb-ns`, which has access to the SCC. - **Note:** The above config yaml file will bind the SCC to the serviceaccount `sidb-sa` in namespace `sidb-ns`. For any other project/namespace update the file appropriately with the namespace before applying. + **Note:** This configuration yaml file example binds the SCC to the serviceaccount `sidb-sa` in namespace `sidb-ns`. For any other project/namespace, you must update the file appropriately with the namespace before applying this example. 3. Set the `serviceAccountName` attribute to `sidb-sa` and the namespace to `sidb-ns` in **[config/samples/sidb/singleinstancedatabase.yaml](../../config/samples/sidb/singleinstancedatabase.yaml)** before deploying the SingleInstanceDatabase resource. ## SingleInstanceDatabase Resource -The Oracle Database Operator creates the `SingleInstanceDatabase` as a custom resource. Doing this enables Oracle Database to be managed as a native Kubernetes object. We will refer `SingleInstanceDatabase` resource as Database from now onwards. +The Oracle Database Operator creates the `SingleInstanceDatabase` as a custom resource. Doing this enables Oracle Database to be managed as a native Kubernetes object. In this document, we will refer to the `SingleInstanceDatabase` resource as the database. ### Resource Details @@ -207,22 +213,22 @@ $ kubectl describe singleinstancedatabase sidb-sample-clone ### Template YAML The template `.yaml` file for Single Instance Database (Enterprise and Standard Editions), including all the configurable options, is available at: -**[config/samples/sidb/singleinstancedatabase.yaml](./../../config/samples/sidb/singleinstancedatabase.yaml)** +**[`config/samples/sidb/singleinstancedatabase.yaml`](./../../config/samples/sidb/singleinstancedatabase.yaml)** **Note:** -The `adminPassword` field in the above `singleinstancedatabase.yaml` file refers to a secret for the SYS, SYSTEM and PDBADMIN users of the Single Instance Database. This secret is required when you provision a new database, or when you clone an existing database. +The `adminPassword` field in the above `singleinstancedatabase.yaml`example file refers to a Secret for the SYS, SYSTEM and PDBADMIN users of the Single Instance Database. This Secret is required when you provision a new database, or when you clone an existing database. -Create this secret using the following command as an example: +Create this Secret using the following command as an example: kubectl create secret generic db-admin-secret --from-literal=oracle_pwd= -This command creates a secret named `db-admin-secret`, with the key `oracle_pwd` mapped to the actual password specified in the command. +This command creates a Secret named `db-admin-secret`, with the key `oracle_pwd` mapped to the actual password specified in the command. ### Create a Database #### New Database -To provision a new database instance on the Kubernetes cluster, use the example **[config/samples/sidb/singleinstancedatabase_create.yaml](../../config/samples/sidb/singleinstancedatabase_create.yaml)**. +To provision a new database instance on the Kubernetes cluster, use the example **[`config/samples/sidb/singleinstancedatabase_create.yaml`](../../config/samples/sidb/singleinstancedatabase_create.yaml)**. 1. Log into [Oracle Container Registry](https://container-registry.oracle.com/) and accept the license agreement for the Database image; ignore if you have accepted the license agreement already. @@ -254,15 +260,15 @@ To provision a new database instance on the Kubernetes cluster, use the example ``` **Note:** -- For ease of use, the storage class **oci-bv** is specified in the **[singleinstancedatabase_create.yaml](../../config/samples/sidb/singleinstancedatabase_create.yaml)**. This storage class facilitates dynamic provisioning of the OCI block volumes on the Oracle OKE for persistent storage of the database. The supported access mode for this class is `ReadWriteOnce`. For other cloud providers, you can similarly use their dynamic provisioning storage classes. +- For ease of use, the storage class **oci-bv** is specified in the **[`singleinstancedatabase_create.yaml`](../../config/samples/sidb/singleinstancedatabase_create.yaml)**. This storage class facilitates dynamic provisioning of the OCI block volumes on the Oracle OKE for persistent storage of the database. The supported access mode for this class is `ReadWriteOnce`. For other cloud providers, you can similarly use their dynamic provisioning storage classes. - It is beneficial to have the database replica pods more than or equal to the number of available nodes if `ReadWriteMany` access mode is used with the OCI NFS volume. By doing so, the pods get distributed on different nodes and the database image is downloaded on all those nodes. This helps in reducing time for the database fail-over if the active database pod dies. - Supports Oracle Database Enterprise Edition (19.3.0), and later releases. -- To pull the database image faster from the container registry, so that you can bring up the SIDB instance quickly, you can use the container-registry mirror of the corresponding cluster's region. For example, if the cluster exists in Mumbai region, then you can use the `container-registry-bom.oracle.com` mirror. For more information on container-registry mirrors, follow the link [https://blogs.oracle.com/wim/post/oracle-container-registry-mirrors-in-oracle-cloud-infrastructure](https://blogs.oracle.com/wim/post/oracle-container-registry-mirrors-in-oracle-cloud-infrastructure). -- To update the init parameters like `sgaTarget` and `pgaAggregateTarget`, refer the `initParams` section of the [singleinstancedatabase.yaml](../../config/samples/sidb/singleinstancedatabase.yaml) file. +- To pull the database image faster from the container registry, so that you can bring up the SIDB instance quickly, you can use the `container-registry mirror` of the corresponding cluster's region. For example, if the cluster exists in Mumbai region, then you can use the `container-registry-bom.oracle.com` mirror. For more information on container-registry mirrors, see: [https://blogs.oracle.com/wim/post/oracle-container-registry-mirrors-in-oracle-cloud-infrastructure](https://blogs.oracle.com/wim/post/oracle-container-registry-mirrors-in-oracle-cloud-infrastructure). +- To update the initialization (init) parameters, such as `sgaTarget` and `pgaAggregateTarget`, see the `initParams` section of the [`singleinstancedatabase.yaml`](../../config/samples/sidb/singleinstancedatabase.yaml) file. #### Pre-built Database -To provision a new pre-built database instance, use the sample **[config/samples/sidb/singleinstancedatabase_prebuiltdb.yaml](../../config/samples/sidb/singleinstancedatabase_prebuiltdb.yaml)** file. For example: +To provision a new pre-built database instance, use the sample **[`config/samples/sidb/singleinstancedatabase_prebuiltdb.yaml](../../config/samples/sidb/singleinstancedatabase_prebuiltdb.yaml)** file. For example: ```sh $ kubectl apply -f singleinstancedatabase_prebuiltdb.yaml @@ -274,32 +280,45 @@ This pre-built image includes the data files of the database inside the image it To build the pre-built database image for the Enterprise/Standard edition, follow these instructions: [Pre-built Database (prebuiltdb) Extension](https://github.com/oracle/docker-images/blob/main/OracleDatabase/SingleInstance/extensions/prebuiltdb/README.md). #### XE Database -To provision new Oracle Database Express Edition (XE) database, use the sample **[config/samples/sidb/singleinstancedatabase_express.yaml](../../config/samples/sidb/singleinstancedatabase_express.yaml)** file. For example: +To provision a new Oracle Database Express Edition (XE) database, use the sample **[config/samples/sidb/singleinstancedatabase_express.yaml](../../config/samples/sidb/singleinstancedatabase_express.yaml)** file. For example: kubectl apply -f singleinstancedatabase_express.yaml -This command pulls the XE image uploaded on the [Oracle Container Registry](https://container-registry.oracle.com/). +This command pulls the XE image available in [Oracle Container Registry](https://container-registry.oracle.com/). **Note:** -- Provisioning Oracle Database express edition is supported for release 21c (21.3.0) only. +- Provisioning Oracle Database Express Edition is supported for release 21c (21.3.0) only. Oracle Database Free replaces Oracle Database Express Edition. - For XE database, only single replica mode (i.e. `replicas: 1`) is supported. -- For XE database, you **cannot change** the init parameters i.e. `cpuCount, processes, sgaTarget or pgaAggregateTarget`. +- For XE database, you **cannot change** the init parameters, such as `cpuCount, processes, sgaTarget or pgaAggregateTarget`. #### Free Database -To provision new Oracle Database Free database, use the sample **[config/samples/sidb/singleinstancedatabase_free.yaml](../../config/samples/sidb/singleinstancedatabase_free.yaml)** file. For example: +To provision new Oracle Database Free, use the sample **[config/samples/sidb/singleinstancedatabase_free.yaml](../../config/samples/sidb/singleinstancedatabase_free.yaml)** file. For example: kubectl apply -f singleinstancedatabase_free.yaml -This command pulls the Free image uploaded on the [Oracle Container Registry](https://container-registry.oracle.com/). +This command pulls the Free image available in [Oracle Container Registry](https://container-registry.oracle.com/). + +#### Free Lite Database +To provision new Oracle Database Free Lite, use the sample **[config/samples/sidb/singleinstancedatabase_free-lite.yaml](../../config/samples/sidb/singleinstancedatabase_free-lite.yaml)** file. For example: + + kubectl apply -f singleinstancedatabase_free-lite.yaml + +This command pulls the Free lite image available in [Oracle Container Registry](https://container-registry.oracle.com/). **Note:** - Provisioning Oracle Database Free is supported for release 23.3.0 and later releases. -- For Free database, only single replica mode (i.e. `replicas: 1`) is supported. -- For Free database, you **cannot change** the init parameters i.e. `cpuCount, processes, sgaTarget or pgaAggregateTarget`. -- Oracle Enterprise Manager Express (OEM Express) is not supported from release 23.3.0 and later releases. +- For Free database, only single replica mode (such as `replicas: 1`) is supported. +- For Free database, you **cannot change** the init parameters. These include parameters such as `cpuCount, processes, sgaTarget or pgaAggregateTarget`. +- Oracle Enterprise Manager Express (OEM Express) is not supported in release 23.3.0 and later releases. + +#### Oracle True Cache +Oracle True Cache is an in-memory, consistent, and automatically managed cache for Oracle Database. +To provision a True Cache instance for Oracle Free Database in Kubernetes, use the sample **[`config/samples/sidb/singleinstancedatabase_free-truecache.yaml`](../../config/samples/sidb/singleinstancedatabase_free-truecache.yaml)** file. For example + + kubectl apply -f singleinstancedatabase_free-truecache.yaml #### Additional Information -You are required to specify the database admin password secret in the corresponding YAML file. The default values mentioned in the `adminPassword.secretName` fields of [singleinstancedatabase_create.yaml](../../config/samples/sidb/singleinstancedatabase_create.yaml), [singleinstancedatabase_prebuiltdb.yaml](../../config/samples/sidb/singleinstancedatabase_prebuiltdb.yaml), [singleinstancedatabase_express.yaml](../../config/samples/sidb/singleinstancedatabase_express.yaml) and [singleinstancedatabse_free.yaml](../../config/samples/sidb/singleinstancedatabase_free.yaml) files are `db-admin-secret`, `prebuiltdb-admin-secret`, `xedb-admin-secret` and `free-admin-secret` respectively. You can create these secrets manually by using the sample command mentioned in the [Template YAML](#template-yaml) section. Alternatively, you can create these secrets by filling the passwords in the **[singleinstancedatabase_secrets.yaml](../../config/samples/sidb/singleinstancedatabase_secrets.yaml)** file and applying it using the command below: +You are required to specify the database administrative user (admin) password Secret in the corresponding YAML file. The default values mentioned in the `adminPassword.secretName` fields of [`singleinstancedatabase_create.yaml`](../../config/samples/sidb/singleinstancedatabase_create.yaml), [`singleinstancedatabase_prebuiltdb.yaml`](../../config/samples/sidb/singleinstancedatabase_prebuiltdb.yaml), [`singleinstancedatabase_express.yaml`](../../config/samples/sidb/singleinstancedatabase_express.yaml) and [`singleinstancedatabse_free.yaml`](../../config/samples/sidb/singleinstancedatabase_free.yaml) files are `db-admin-secret`, `prebuiltdb-admin-secret`, `xedb-admin-secret` and `free-admin-secret` respectively. You can create these Secrets manually by using the sample command mentioned in the [`Template YAML`](#template-yaml) section. Alternatively, you can create these Secrets by filling in the passwords in the **[`singleinstancedatabase_secrets.yaml`](../../config/samples/sidb/singleinstancedatabase_secrets.yaml)** file and applying them using the following command: ```bash kubectl apply -f singleinstancedatabase_secrets.yaml @@ -307,7 +326,7 @@ kubectl apply -f singleinstancedatabase_secrets.yaml ### Connecting to Database -Creating a new database instance takes a while. When the `status` column returns the response `Healthy`, the Database is open for connections. +Creating a new database instance takes a while. When the `status` column returns the response `Healthy`, the database is open for connections. ```sh $ kubectl get singleinstancedatabase sidb-sample -o "jsonpath={.status.status}" @@ -315,7 +334,7 @@ $ kubectl get singleinstancedatabase sidb-sample -o "jsonpath={.status.status}" Healthy ``` -Clients can get the connect-string to the CDB from `.status.connectString` and PDB from `.status.pdbConnectString`. For example: +Clients can obtain the connect string to the CDB from `.status.connectString`, and the connect string to the PDB from `.status.pdbConnectString`. For example: ```sh $ kubectl get singleinstancedatabase sidb-sample -o "jsonpath={.status.connectString}" @@ -328,7 +347,7 @@ $ kubectl get singleinstancedatabase sidb-sample -o "jsonpath={.status.pdbConnec 10.0.25.54:1521/ORCLPDB ``` -Use any supported client or SQLPlus to connect to the database using the above connect strings as follows +To connect to the database using the connect strings returned by the commands above, you can use any supported client, or use SQLPlus. For example: ```sh $ sqlplus sys/<.spec.adminPassword>@10.0.25.54:1521/ORCL as sysdba @@ -356,7 +375,7 @@ $ kubectl get singleinstancedatabase sidb-sample -o "jsonpath={.status.oemExpres **Note:** OEM Express is not available for 23.3.0 and later releases ### Database Persistence (Storage) Configuration Options -The database persistence can be achieved in the following two ways: +You can configure database persistence in the following two ways: - Dynamic Persistence Provisioning - Static Persistence Provisioning @@ -379,8 +398,8 @@ $ kubectl patch singleinstancedatabase sidb-sample -p '{"spec":{"persistence":{" - User can only scale up a volume/storage and not scale down #### Static Persistence -In **Static Persistence Provisioning**, you have to create a volume manually, and then use the name of this volume with the `<.spec.persistence.datafilesVolumeName>` field which corresponds to the `datafilesVolumeName` field of the persistence section in the **[singleinstancedatabase.yaml](../../config/samples/sidb/singleinstancedatabase.yaml)**. The `Reclaim Policy` of such volume can be set to `Retain`. So, this volume does not get deleted with the deletion of its corresponding deployment. -For example in **Minikube**, a persistent volume can be provisioned using the sample yaml file below: +In **Static Persistence Provisioning**, you must create a volume manually, and then use the name of this volume with the `<.spec.persistence.datafilesVolumeName>` field, which corresponds to the `datafilesVolumeName` field of the persistence section in the **[`singleinstancedatabase.yaml`](../../config/samples/sidb/singleinstancedatabase.yaml)**. The `Reclaim Policy` of such volumes can be set to `Retain`. When this policy is set, the volume is not deleted when its corresponding deployment is deleted. +For example in **Minikube**, a persistent volume can be provisioned using the following yaml file example: ```yaml apiVersion: v1 kind: PersistentVolume @@ -395,7 +414,7 @@ spec: hostPath: path: /data/oradata ``` -The persistent volume name (i.e. db-vol) can be mentioned in the `datafilesVolumeName` field of the **[singleinstancedatabase.yaml](../../config/samples/sidb/singleinstancedatabase.yaml)**. `storageClass` field is not required in this case, and can be left empty. +The persistent volume name (in this case, `db-vol`) can be mentioned in the `datafilesVolumeName` field of the **[`singleinstancedatabase.yaml`](../../config/samples/sidb/singleinstancedatabase.yaml)**. `storageClass` field is not required in this case, and can be left empty. Static Persistence Provisioning in Oracle Cloud Infrastructure (OCI) is explained in the following subsections: @@ -452,7 +471,7 @@ spec: - Whenever a mount target is provisioned in OCI, its `Reported Size (GiB)` values are very large. This is visible on the mount target page when logged in to the OCI console. Some applications will fail to install if the results of a space requirements check show too much available disk space. So in the OCI Console, click the little "Pencil" icon besides the **Reported Size** parameter of the Mount Target to specify, in gigabytes (GiB), the maximum capacity reported by file systems exported through this mount target. This setting does not limit the actual amount of data you can store. -- Make sure to open the required ports to access the NFS volume from the K8S cluster: add the required ports to the security list of the subnet where your K8S nodes are connected to; see **[here](https://docs.oracle.com/en-us/iaas/Content/File/Tasks/securitylistsfilestorage.htm)** for the details. +- You must open the required ports to access the NFS volume from the K8S cluster. Add the required ports to the security list of the subnet to which your K8S nodes are connected. For more information, see **[Security Lists File Storage](https://docs.oracle.com/en-us/iaas/Content/File/Tasks/securitylistsfilestorage.htm)** for the details. ### Configuring a Database The `OraOperator` facilitates you to configure the database. Various database configuration options are explained in the following subsections: @@ -508,7 +527,7 @@ The following attributes cannot be modified after creating the Single Instance D - `pdbName` - `primaryDatabaseRef` -If you attempt to changing one of these attributes, then you receive an error similar to the following: +If you attempt to change one of these attributes, then you receive an error similar to the following: ```sh $ kubectl --type=merge -p '{"spec":{"sid":"ORCL1"}}' patch singleinstancedatabase sidb-sample @@ -520,7 +539,7 @@ $ kubectl --type=merge -p '{"spec":{"sid":"ORCL1"}}' patch singleinstancedatabas To create copies of your existing database quickly, you can use the cloning functionality. A cloned database is an exact, block-for-block copy of the source database. Cloning is much faster than creating a fresh database and copying over the data. -To quickly clone the existing database sidb-sample created above, use the sample **[config/samples/sidb/singleinstancedatabase_clone.yaml](../../config/samples/sidb/singleinstancedatabase_clone.yaml)** file. +To quickly clone the existing database `sidb-sample` we previously created for this document, use the sample **[`config/samples/sidb/singleinstancedatabase_clone.yaml`](../../config/samples/sidb/singleinstancedatabase_clone.yaml)** file. For example: @@ -544,7 +563,7 @@ Patched Oracle Docker images can be built by using this [patching extension](htt #### Patch -To patch an existing database, edit and apply the **[config/samples/sidb/singleinstancedatabase_patch.yaml](../../config/samples/sidb/singleinstancedatabase_patch.yaml)** file of the database resource/object either by specifying a new release update for image attributes, or by running the following command: +To patch an existing database, edit and apply the **[`config/samples/sidb/singleinstancedatabase_patch.yaml`](../../config/samples/sidb/singleinstancedatabase_patch.yaml)** file of the database resource/object either by specifying a new release update for image attributes, or by running the following command: ```sh kubectl --type=merge -p '{"spec":{"image":{"pullFrom":"patched-image:tag","pullSecrets":"pull-secret"}}}' patch singleinstancedatabase sidb-sample @@ -556,7 +575,7 @@ singleinstancedatabase.database.oracle.com/sidb-sample patched After patching is complete, the database pods are restarted with the new release update image. **Note:** -- Only enterprise and standard editions support patching. +- Only Enterprise and Standard Editions support patching. #### Patch after Cloning @@ -578,7 +597,7 @@ $ kubectl get singleinstancedatabase sidb-sample -o "jsonpath={.status.releaseUp ``` #### Rollback -You can roll back to a prior database version by specifying the old image in the `image` field of the **[config/samples/sidb/singleinstancedatabase_patch.yaml](../../config/samples/sidb/singleinstancedatabase_patch.yaml)** file, and applying it by the following command: +You can roll back to a prior database version by specifying the old image in the `image` field of the **[`config/samples/sidb/singleinstancedatabase_patch.yaml`](../../config/samples/sidb/singleinstancedatabase_patch.yaml)** file, and applying it using the following command: ```bash kubectl apply -f singleinstancedatabase_patch.yaml @@ -594,12 +613,12 @@ singleinstancedatabase.database.oracle.com/sidb-sample patched ``` ### Delete a Database -Please run the following command to delete the database: +To delete the database, run the following command : ```bash kubectl delete singleinstancedatabase.database.oracle.com sidb-sample ``` -The command above will delete the database pods and associated service. +This command will delete the database pods and associated service. ### Advanced Database Configurations Some advanced database configuration scenarios are as follows: @@ -623,17 +642,20 @@ The following table depicts the fail over matrix for any destructive operation t | PDB close | No | **Note:** -- Maintence shutdown/startup can be executed using the scripts /home/oracle/shutDown.sh and /home/oracle/startUp.sh +- Maintence shutdown/startup can be run by using the scripts `/home/oracle/shutDown.sh` and `/home/oracle/startUp.sh` - This functionality requires the [k8s extension](https://github.com/oracle/docker-images/tree/main/OracleDatabase/SingleInstance/extensions/k8s) extended images. The database image from the container registry `container-registry.oracle.com` includes the K8s extension. - Because Oracle Database Express Edition (XE) does not support [k8s extension](https://github.com/oracle/docker-images/tree/main/OracleDatabase/SingleInstance/extensions/k8s), it does not support multiple replicas. -- If the `ReadWriteOnce` access mode is used, all the replicas will be scheduled on the same node where the persistent volume would be mounted. -- If the `ReadWriteMany` access mode is used, all the replicas will be distributed on different nodes. So, it is recommended to have replicas more than or equal to the number of the nodes as the database image is downloaded on all those nodes. This is beneficial in quick cold fail-over scenario (when the active pod dies) as the image would already be available on that node. +- If the `ReadWriteOnce` access mode is used, then all the replicas will be scheduled on the same node where the persistent volume would be mounted. +- If the `ReadWriteMany` access mode is used, then all the replicas will be distributed on different nodes. For this reason, Oracle recommends that you have replicas more than or equal to the number of the nodes, because the database image is downloaded on all those nodes. This is beneficial in quick cold fail-over scenario (when the active pod dies) as the image would already be available on that node. + +#### Database Pod Resource Management +When creating a Single Instance Database, you can specify the CPU and memory resources needed by the database pod. These specified resources are passed to the `kube-scheduler` so that the pod is scheduled on one of the pods that has the required resources available. To use database pod resource management, specify values for the `resources` attributes in the [`config/samples/sidb/singleinstancedatabase.yaml`](../../config/samples/sidb/singleinstancedatabase.yaml) file, and apply it. #### Database Pod Resource Management When creating a Single Instance Database you can specify the cpu and memory resources needed by the database pod. These specified resources are passed to the `kube-scheduler` so that the pod gets scheduled on one of the pods that has the required resources available. To use database pod resource management specify values for the `resources` attributes in the [config/samples/sidb/singleinstancedatabase.yaml](../../config/samples/sidb/singleinstancedatabase.yaml) file, and apply it. #### Setup Database with LoadBalancer -For the Single Instance Database, the default service is the `NodePort` service. You can enable the `LoadBalancer` service by using `kubectl patch` command. +For the Single Instance Database, the default service is the `NodePort` service. You can enable the `LoadBalancer` service by using the `kubectl patch` command. For example: @@ -644,7 +666,7 @@ $ kubectl --type=merge -p '{"spec":{"loadBalancer": true}}' patch singleinstance ``` ### Enabling TCPS Connections -You can enable TCPS connections in the database by setting the `enableTCPS` field to `true` in the [config/samples/sidb/singleinstancedatabase.yaml](../../config/samples/sidb/singleinstancedatabase.yaml) file, and applying it. +You can enable TCPS connections in the database by setting the `enableTCPS` field to `true` in the [`config/samples/sidb/singleinstancedatabase.yaml`](../../config/samples/sidb/singleinstancedatabase.yaml) file, and applying it. Alternatively, you can use the following command: ```bash @@ -684,44 +706,45 @@ true cd export TNS_ADMIN=$(pwd) ``` - After this, connect using SQL\*Plus using the following sample commands: + After this, connect with SQL*Plus, using the following example commands: ```bash sqlplus sys@ORCL1 as sysdba ``` ### Specifying Custom Ports -As mentioned in the section [Setup Database with LoadBalancer](#setup-database-with-loadbalancer), there are two kubernetes services possible for the database: NodePort and LoadBalancer. You can specify which port to use with these services by editing the `listenerPort` and `tcpsListenerPort` fields of the [config/samples/sidb/singleinstancedatabase.yaml](../../config/samples/sidb/singleinstancedatabase.yaml) file. +As mentioned in the section [Setup Database with LoadBalancer](#setup-database-with-loadbalancer), there are two kubernetes services possible for the database: NodePort and LoadBalancer. You can specify which port to use with these services by editing the `listenerPort` and `tcpsListenerPort` fields of the [`config/samples/sidb/singleinstancedatabase.yaml`](../../config/samples/sidb/singleinstancedatabase.yaml) file. `listenerPort` is intended for normal database connections. Similarly, `tcpsListenerPort` is intended for TCPS database connections. -If the `LoadBalancer` is enabled, the `listenerPort`, and `tcpsListenerPort` will be the opened ports on the Load Balancer for normal and TCPS database connections respectively. The default values of `listenerPort` and `tcpsListenerPort` are 1521 and 2484 respectively when the `LoadBalancer` is enabled. +If the `LoadBalancer` is enabled, then the `listenerPort`, and `tcpsListenerPort` will be the opened ports on the Load Balancer for normal and TCPS database connections respectively. When the `LoadBalancer` is enabled, the default values of `listenerPort` and `tcpsListenerPort` are 1521 and 2484. -In case of `NodePort` service, `listenerPort`, and `tcpsListenerPort` will be the opened ports on the Kubernetes nodes for for normal and TCPS database connections respectively. In this case, the allowed range for the `listenerPort`, and `tcpsListenerPort` is 30000-32767. +If the `NodePort` service is enabled, then the `listenerPort`, and `tcpsListenerPort` will be the opened ports on the Kubernetes nodes for for normal and TCPS database connections respectively. In this case, the allowed range for the `listenerPort`, and `tcpsListenerPort` is 30000-32767. **Note:** -- `listenerPort` and `tcpsListenerPort` can not have same values. -- `tcpsListenerPort` will come into effect only when TCPS connections are enabled (i.e. `enableTCPS` field is set in [config/samples/sidb/singleinstancedatabase.yaml](../../config/samples/sidb/singleinstancedatabase.yaml) file). -- If TCPS connections are enabled, and `listenerPort` is commented/removed in the [config/samples/sidb/singleinstancedatabase.yaml](../../config/samples/sidb/singleinstancedatabase.yaml) file, only TCPS endpoint will be exposed. -- If LoadBalancer is enabled, and either `listenerPort` or `tcpsListenerPort` is changed, then it takes some time to complete the work requests (drain existing backend sets and create new ones). In this time, the database connectivity is broken. Although, SingleInstanceDatabase and LoadBalancer remain in the healthy state, you can check the progress of the work requests by logging into the cloud provider's console and checking the corresponding LoadBalancer. +- `listenerPort` and `tcpsListenerPort` cannot have same values. +- `tcpsListenerPort` will come into effect only when TCPS connections are enabled (specifically, the `enableTCPS` field is set in [`config/samples/sidb/singleinstancedatabase.yaml`](../../config/samples/sidb/singleinstancedatabase.yaml) file). +- If TCPS connections are enabled, and `listenerPort` is commented or removed in the [`config/samples/sidb/singleinstancedatabase.yaml`](../../config/samples/sidb/singleinstancedatabase.yaml) file, then only the TCPS endpoint will be exposed. +- If LoadBalancer is enabled, and either `listenerPort` or `tcpsListenerPort` is changed, then it takes some time to complete the work requests (drain existing backend sets and create new ones). During this time, the database connectivity is broken, although `SingleInstanceDatabase` and `LoadBalancer` remain in a healthy state. To check the progress of the work requests, you can by log in to the Cloud provider's console and check the corresponding LoadBalancer. -### Setup Data Guard Configuration for a Single Instance Database (Preview status) +### Setup Data Guard Configuration for a Single Instance Database ### Create a Standby Database #### Prerequisites -- Before creating a Standby, ensure that ArchiveLog, FlashBack, and ForceLog on primary Single Instance Database(`.spec.primaryDatabaseRef`) are turned on. -- Standby database is not supported for TCPS enabled Primary databases. +- Before creating a Standby, ensure that ArchiveLog, FlashBack, and ForceLog on the primary Single Instance Database(`.spec.primaryDatabaseRef`) are turned on. +- Standby database is not supported for TCPS-enabled Primary databases. #### Template YAML -To create a standby database, edit and apply the sample yaml file [config/samples/sidb/singleinstancedatabase_standby.yaml](../../config/samples/sidb/singleinstancedatabase_standby.yaml). +To create a standby database, edit and apply the example YAML file [`config/samples/sidb/singleinstancedatabase_standby.yaml`](../../config/samples/sidb/singleinstancedatabase_standby.yaml). **Note:** -- The `adminPassword` field of the above [config/samples/sidb/singleinstancedatabase_standby.yaml](../../config/samples/sidb/singleinstancedatabase_standby.yaml) contains an admin password secret of the primary database ref for Standby Database creation. This secret will get deleted after the database pod becomes ready if the `keepSecret` attribute of `adminPassword` field is set to `false`. By default `keepSecret` is set to `true`. -- Mention referred primary database in `.spec.primaryDatabaseRef` in the yaml file. -- `.spec.createAs` field of the yaml file should be set to "standby". -- Database configuration like `Archivelog`, `FlashBack`, `ForceLog`, `TCPS connections` are not supported for standby database. +- The `adminPassword` field of the above [`config/samples/sidb/singleinstancedatabase_standby.yaml`](../../config/samples/sidb/singleinstancedatabase_standby.yaml) contains an admin password Secret of the primary database referred to for Standby Database creation. By default `keepSecret` is set to `true`, which means that the secret is saved. However, if you want to delete the Secret after the database pod becomes ready, then this Secret will be deleted if the `keepSecret` attribute of `adminPassword` field is set to `false`. . +- Specify the primary database with which the standby database is associateed in the `.spec.primaryDatabaseRef` yaml file. +- The `.spec.createAs` field of the yaml file should be set to "standby". +- Database configuration, such as `Archivelog`, `FlashBack`, `ForceLog`, `TCPS connections`, are not supported for standby database. #### List Standby Databases +To list the standby databases, use the `get singleinstancedatabase` command. For example: ```sh kubectl get singleinstancedatabase @@ -733,7 +756,7 @@ stdby-1 Enterprise Healthy PHYSICAL_STANDBY 19.3.0.0.0 10.25.0.27:3239 ``` ### Query Primary Database Reference -You can query the corresponding primary database for every standby database. +You can query the corresponding primary database for every standby database. For example: ```sh kubectl get singleinstancedatabase stdby-1 -o "jsonpath={.status.primaryDatabase}" @@ -742,7 +765,7 @@ sidb-19 #### Creation Status - Creating a new standby database instance takes a while. When the 'status' status returns the response "Healthy", the Database is open for connections. + Creating a new standby database instance takes a while. When the 'status' status returns the response "Healthy", the database is open for connections. For example: ```sh $ kubectl get singleinstancedatabase stdby-1 -o "jsonpath={.status.status}" @@ -754,23 +777,23 @@ $ kubectl get singleinstancedatabase stdby-1 -o "jsonpath={.status.status}" #### Template YAML -After creating standbys, setup a dataguard configuration with protection mode and switch over capability using the following sample yaml. -[config/samples/sidb/dataguardbroker.yaml](./../../config/samples/sidb/dataguardbroker.yaml) +After creating standbys, set up an Oracle Data Guard (Data Guard) configuration with protection mode, and switch over capability using the following example YAML: +[`config/samples/sidb/dataguardbroker.yaml`](./../../config/samples/sidb/dataguardbroker.yaml) #### Create DataGuardBroker Resource -Provision a new DataguardBroker custom resource for a single instance database(`.spec.primaryDatabaseRef`) by specifying appropriate values for the primary and standby databases in the example `.yaml` file, and running the following command: +To use the Data Guard broker, provision a new `dataguardbroker` custom resource for a single instance database(`.spec.primaryDatabaseRef`) by specifying the appropriate values for the primary and standby databases in the example `.yaml` file, and running the following command: ```sh $ kubectl create -f dataguardbroker.yaml dataguardbroker.database.oracle.com/dataguardbroker-sample created ``` -**Note:** The following attributes cannot be patched post DataguardBroker resource creation : `primaryDatabaseRef, protectionMode` +**Note:** The following attributes cannot be patched after you create the `dataguardbroker` resource: `primaryDatabaseRef, protectionMode` #### DataguardBroker List -To list the DataguardBroker resources, use the following command: +To list the Data Guard broker resources, use the following command: ```sh $ kubectl get dataguardbroker -o name @@ -780,6 +803,7 @@ To list the DataguardBroker resources, use the following command: ``` #### Quick Status +You can obtain a quick status of Data Guard broker by using the following command: ```sh $ kubectl get dataguardbroker dataguardbroker-sample @@ -790,6 +814,7 @@ To list the DataguardBroker resources, use the following command: ``` #### Detailed Status +To obtain more detailed Data Guard broker status, use this command: ```sh $ kubectl describe dataguardbroker dataguardbroker-sample @@ -827,8 +852,7 @@ To list the DataguardBroker resources, use the following command: Keep Secret: true Secret Key: oracle_pwd Secret Name: db-secret - Fast Start Fail Over: - Enable: true + Fast Start Failover: false Primary Database Ref: sidb-sample Protection Mode: MaxAvailability Set As Primary Database: @@ -838,6 +862,7 @@ To list the DataguardBroker resources, use the following command: Status: Cluster Connect String: dataguardbroker-sample.default:1521/DATAGUARD External Connect String: 10.0.25.85:31167/DATAGUARD + Fast Start Failover: false Primary Database: OR19E3 Standby Databases: OR19E3S1,OR19E3S2 Status: Healthy @@ -850,9 +875,9 @@ To list the DataguardBroker resources, use the following command: ### Perform a Switchover -Specify the approppriate SID (SID of one of `.spec.primaryDatabaseRef` , `.spec.standbyDatabaseRefs[]`) to be set primary in the `.spec.setAsPrimaryDatabase` of [dataguardbroker.yaml](./../../config/samples/sidb/dataguardbroker.yaml) and apply the yaml file. +Specify the approppriate database system identifier (SID) (the SID of one of `.spec.primaryDatabaseRef` , `.spec.standbyDatabaseRefs[]`) to be set primary in the `.spec.setAsPrimaryDatabase` of [`dataguardbroker.yaml`](./../../config/samples/sidb/dataguardbroker.yaml) and apply the yaml file. -The database will be set to primary. Ignored if the database is already primary. +When you apply the YAML file, the database you specify will be set to primary. However, if the database specified with the `apply` command is already the primary, then this command has no effect: ```sh $ kubectl apply -f dataguardbroker.yaml @@ -860,7 +885,7 @@ $ kubectl apply -f dataguardbroker.yaml dataguardbroker.database.oracle.com/dataguardbroker-sample apply ``` -Or use the patch command +You can also use the patch command ```sh $ kubectl --type=merge -p '{"spec":{"setAsPrimaryDatabase":"ORCLS1"}}' patch dataguardbroker dataguardbroker-sample @@ -868,40 +893,87 @@ $ kubectl --type=merge -p '{"spec":{"setAsPrimaryDatabase":"ORCLS1"}}' patch dat dataguardbroker.database.oracle.com/dataguardbroker-sample patched ``` -#### Static Primary Database Connection String +### Enable Fast-Start Failover + +Oracle Data Guard Fast-Start Failover (FSFO) monitors your Oracle Data Guard environments and initiates an automatic failover in the case of an outage. +To enable FSFO, ensure the primary database is in the primary role, set the attribute `.spec.fastStartFailover` to `true` in [`datguardbroker.yaml`](./../../config/samples/sidb/dataguardbroker.yaml), and then apply it. For example: + +```sh +$ kubectl apply -f dataguardbroker.yaml + + dataguardbroker.database.oracle.com/dataguardbroker-sample configured +``` + +You can also use the patch command: + +```sh +$ kubectl --type=merge -p '{"spec":{"fastStartFailover": true}}' patch dataguardbroker dataguardbroker-sample + + dataguardbroker.database.oracle.com/dataguardbroker-sample patched +``` + +Applying this results in the creation of a pod running the Observer. The Observer is a component of the DGMGRL interface, which monitors the availability of the primary database. + +**Note:** When the attribute `fastStartFailover` is `true`, then performing a switchover by specifying `setAsPrimaryDatabase` is not allowed. + +### Convert Standby to Snapshot Standby + +A snapshot standby is a fully updatable standby database that can be used development and testing. It receives and archives, but does not apply redo data from a primary database. The redo data received from the primary database is applied after a snapshot standby database is converted back into a physical standby database, and after discarding all local updates to the snapshot standby database. + +To convert a standby database to a snapshot standby, Ensure Fast-Start Failover is disabled, and tshen set the attribute `.spec.convertToSnapshotStandby` to `true` in [`singleinstancedatabase.yaml`](./../../config/samples/sidb/singleinstancedatabase.yaml) before applying it. For example: + +```sh +$ kubectl apply -f singleinstancedatabase.yaml + + singleinstancedatabase.database.oracle.com/sidb-sample configured +``` - External and internal (running in Kubernetes pods) clients can connect to the primary database using `.status.connectString` and `.status.clusterConnectString` of the DataguardBroker resource respectively. These connection strings are fixed for the DataguardBroker resource and will not change on switchover. They can be queried using the following command +You can also use the patch command: + +```sh +$ kubectl --type=merge -p '{"spec":{"convertToSnapshotStandby":true}}' patch singleinstancedatabase sidb-sample + + singleinstancedatabase.database.oracle.com/sidb-sample patched +``` + +### Static Data Guard Connect String + + External and internal (running in pods) applications can always connect to the database in the primary role by using `.status.externalConnectString` and `.status.clusterConnectString` of the Data Guard broker resource respectively. These connect strings are fixed for the Data Guard broker resource, and will not change on switchover or failover. The external connect string can be obtained using the following command: ```sh $ kubectl get dataguardbroker dataguardbroker-sample -o "jsonpath={.status.externalConnectString}" 10.0.25.87:1521/DATAGUARD ``` - The above connection string will always automatically route to the Primary database not requiring clients to change the connection string after switchover + This connect string will always automatically route to the database in the primary role. Client applications can be totally agnostic of the databases in the Oracle Data Guard configuration. Their number or host/IP details are not needed in the connect string. -### Patch Primary and Standby databases in Data Guard configuration +### Patch Primary and Standby databases Databases (both primary and standby) running in you cluster and managed by the Oracle Database operator can be patched between release updates of the same major release. -To patch an existing database, edit and apply the **[config/samples/sidb/singleinstancedatabase_patch.yaml](../../config/samples/sidb/singleinstancedatabase_patch.yaml)** file of the database resource/object either by specifying a new release update for image attributes, or by running the following command: +To patch an existing database, edit and apply the **[`config/samples/sidb/singleinstancedatabase_patch.yaml`](../../config/samples/sidb/singleinstancedatabase_patch.yaml)** file of the database resource/object either by specifying a new release update for image attributes, or by running the following command: ```sh kubectl --type=merge -p '{"spec":{"image":{"pullFrom":"patched-image:tag","pullSecrets":"pull-secret"}}}' patch singleinstancedatabase ``` -Follow these steps for patching databases configured with the dataguard broker: -1. First patch all the standby databases by replacing the image with the new release update image -2. Perform switch over of the primary to one of the standby databases -3. Now patch the original primary database (currently standby after #2) - After #3 the software for primary and standby databases is at the same release update -4. Now bounce the current primary database by updating the replica count to 0 and then 1 - #4 will trigger a datapatch execution resulting in patching of the datafiles -5. Finally perform switch over of the current primary back to the original primary (current standby) +Follow these steps for patching databases configured with the Data Guard broker: +1. Ensure Fast-Start Failover is disabled by running the following command +```sh + kubectl patch dataguardbroker dataguardbroker-sample -p '{"spec":{"fastStartFailover": false}}' --type=merge +``` +2. Patch all the standby databases by replacing the image with the new release update image. +3. Perform switchover of the primary to one of the standby databases. +4. Patch the original primary database (currently standby after #2) + After step 3, the software for primary and standby databases is at the same release update +5. Bounce the current primary database by updating the replica count to 0 and then 1 + Step 5 will trigger a datapatch execution, which results in patching the datafiles +6. Finally, perform switch over of the current primary back to the original primary (current standby) ### Delete the Data Guard Configuration -To delete a standby or primary database configured for Data Guard, delete the dataguardbroker resource first followed by the standby databases and finally the primary database +To delete a standby or primary database configured for Oracle Data Guard, delete the `dataguardbroker` resource. After that is done, delete the standby databases, and then finally the primary database. #### Delete DataguardBroker Resource ```sh @@ -910,8 +982,7 @@ $ kubectl delete dataguardbroker dgbroker-sample dataguardbroker.database.oracle.com/dgbroker-sample deleted ``` -**Note:** If a switch over to standby was performed, make sure to switch back to the original primary database before deleting the dataguard broker resource - +**Note:** If a switchover to standby was performed, then ensure that you switch back to the original primary database before deleting the Data Guard broker resource. For example: #### Delete Standby Database ```sh $ kubectl delete singleinstancedatabase stdby-1 @@ -921,15 +992,15 @@ $ kubectl delete singleinstancedatabase stdby-1 ### Execute Custom Scripts -Custom scripts (sql and/or shell scripts) can be executed after the initial database setup and/or after each startup of the database. SQL scripts will be executed as sysdba, shell scripts will be executed as the current user. To ensure proper order it is recommended to prefix your scripts with a number. For example `01_users.sql`, `02_permissions.sql`, etc. Place all such scripts in setup and startup folders created in a persistent volume to execute them post setup and post startup respectively. +You can set up custom scripts (SQL, shell scripts, or both) to run after the initial database setup, and to have scripts run after each startup of the database. SQL scripts will be executed as `sysdba`, and shell scripts will be executed as the current user. To ensure proper order, Oracle recommends that you prefix your scripts with a number. For example: `01_users.sql`, `02_permissions.sql`, and son on. To ensure that these scripts are available to run after setup or after each database startup, place all such scripts in setup and startup folders created in a persistent volume. -Create a persistent volume using [static provisioning](#static-persistence) and then specify the name of this volume with the `<.spec.persistence.scriptsVolumeName>` field which corresponds to the `scriptsVolumeName` field of the persistence section in the **[singleinstancedatabase.yaml](../../config/samples/sidb/singleinstancedatabase.yaml)**. +Create a persistent volume by using [static provisioning](#static-persistence) and then specify the name of this volume with the `<.spec.persistence.scriptsVolumeName>` field which corresponds to the `scriptsVolumeName` field of the persistence section in the **[`singleinstancedatabase.yaml`](../../config/samples/sidb/singleinstancedatabase.yaml)**. -**Note:** Executing custom scripts requires read and list access for persistent volumes as mentioned in [prerequisites](#prerequisites) +**Note:** Running custom scripts requires read and list access for persistent volumes, as mentioned in [prerequisites](#prerequisites) ## OracleRestDataService Resource -The Oracle Database Operator creates the `OracleRestDataService` as a custom resource. We will refer `OracleRestDataService` as ORDS from now onwards. Creating ORDS as a custom resource enables the RESTful API access to the Oracle Database in K8s and enables it to be managed as a native Kubernetes object. +The Oracle Database Operator creates the `OracleRestDataService` as a custom resource. In this documeent, we will refer to `OracleRestDataService` as ORDS. Creating ORDS as a custom resource enables the RESTful API access to the Oracle Database in K8s, and enables it to be managed as a native Kubernetes object. ### Resource Details @@ -950,7 +1021,7 @@ To obtain a quick status check of the ORDS service, use the following command: $ kubectl get oraclerestdataservice ords-sample NAME STATUS DATABASE DATABASE API URL DATABASE ACTIONS URL APEX URL -ords-sample Healthy sidb-sample https://10.0.25.54:8443/ords/ORCLPDB1/_/db-api/stable/ https://10.0.25.54:8443/ords/sql-developer https://10.0.25.54:8443/ords/ORCLPDB1/apex +ords-sample Healthy sidb-sample http://10.0.25.54:8181/ords/schema1/_/db-api/stable/ http://10.0.25.54:8181/ords/sql-developer http://10.0.25.54:8181/ords/apex ``` @@ -969,10 +1040,10 @@ $ kubectl describe oraclerestdataservice ords-sample Metadata: ... Spec: ... Status: - Cluster Db API URL: https://ords21c-1.default:8443/ords/ORCLPDB1/_/db-api/stable/ - Database Actions URL: https://10.0.25.54:8443/ords/sql-developer - Database API URL: https://10.0.25.54:8443/ords/ORCLPDB1/_/db-api/stable/ - Apex URL: https://10.0.25.54:8443/ords/ORCLPDB1/apex + Cluster Db API URL: http://ords21c-1.default:8181/ords/schema1/_/db-api/stable/ + Database Actions URL: http://10.0.25.54:8181/ords/sql-developer + Database API URL: http://10.0.25.54:8181/ords/schema1/_/db-api/stable/ + Apex URL: http://10.0.25.54:8181/ords/apex Database Ref: sidb21c-1 Image: Pull From: ... @@ -994,15 +1065,14 @@ The template `.yaml` file for Oracle Rest Data Services (`OracleRestDataService` **Note:** - The `adminPassword` and `ordsPassword` fields in the `oraclerestdataservice.yaml` file contains secrets for authenticating the Single Instance Database and the ORDS user with the following roles: `SQL Administrator, System Administrator, SQL Developer, oracle.dbtools.autorest.any.schema`. -- To build the ORDS image, use the following instructions: [Building Oracle REST Data Services Install Images](https://github.com/oracle/docker-images/tree/main/OracleRestDataServices#building-oracle-rest-data-services-install-images). -- By default, ORDS uses self-signed certificates. To use certificates from the Certificate Authority, the ORDS image needs to be rebuilt after specifying the values of `ssl.cert` and `ssl.cert.key` in the [standalone.properties](https://github.com/oracle/docker-images/blob/main/OracleRestDataServices/dockerfiles/standalone.properties.tmpl) file. After you rebuild the ORDS image, use the rebuilt image in the **[config/samples/sidb/oraclerestdataservice.yaml](../../config/samples/sidb/oraclerestdataservice.yaml)** file. -- If you want to install ORDS in a [prebuilt database](#provision-a-pre-built-database), make sure to attach the **database persistence** by uncommenting the `persistence` section in the **[config/samples/sidb/singleinstancedatabase_prebuiltdb.yaml](../../config/samples/sidb/singleinstancedatabase_prebuiltdb.yaml)** file, while provisioning the prebuilt database. + +- If you want to install ORDS in a [prebuilt database](#provision-a-pre-built-database), then ensure that you attach the **database persistence** by uncommenting the `persistence` section in the **[`config/samples/sidb/singleinstancedatabase_prebuiltdb.yaml`](../../config/samples/sidb/singleinstancedatabase_prebuiltdb.yaml)** file, while provisioning the prebuilt database. ### REST Enable a Database #### Provision ORDS -To quickly provision a new ORDS instance, use the sample **[config/samples/sidb/oraclerestdataservice_create.yaml](../../config/samples/sidb/oraclerestdataservice_create.yaml)** file. For example: +To quickly provision a new ORDS instance, use the example **[`config/samples/sidb/oraclerestdataservice_create.yaml`](../../config/samples/sidb/oraclerestdataservice_create.yaml)** file. For example: ```sh $ kubectl apply -f oraclerestdataservice_create.yaml @@ -1012,18 +1082,17 @@ $ kubectl apply -f oraclerestdataservice_create.yaml After this command completes, ORDS is installed in the container database (CDB) of the Single Instance Database. ##### Note: -You are required to specify the ORDS secret in the [oraclerestdataservice_create.yaml](../../config/samples/sidb/oraclerestdataservice_create.yaml) file. The default value mentioned in the `adminPassword.secretName` field is `ords-secret`. You can create this secret manually by using the following command: +You are required to specify the ORDS Secret in the [`oraclerestdataservice_create.yaml`](../../config/samples/sidb/oraclerestdataservice_create.yaml) file. The default value mentioned in the `adminPassword.secretName` field is `ords-secret`. You can create this Secret manually by using the following command: ```bash kubectl create secret generic ords-secret --from-literal=oracle_pwd= ``` -Alternatively, you can create this secret and the APEX secret by filling the passwords in the **[oraclerestdataservice_secrets.yaml](../../config/samples/sidb/oraclerestdataservice_secrets.yaml)** file and applying it using the command below: +Alternatively, you can create this Secret by filling the passwords in the **[`oraclerestdataservice_secrets.yaml`](../../config/samples/sidb/oraclerestdataservice_secrets.yaml)** file and applying it using the following command: ```bash kubectl apply -f singleinstancedatabase_secrets.yaml ``` -The APEX secret created above, will be used while [installing APEX](#apex-installation). #### Creation Status @@ -1043,7 +1112,7 @@ Clients can access the REST Endpoints using `.status.databaseApiUrl` as shown in ```sh $ kubectl get oraclerestdataservice/ords-sample -o "jsonpath={.status.databaseApiUrl}" - https://10.0.25.54:8443/ords/ORCLPDB1/_/db-api/stable/ + http://10.0.25.54:8181/ords/schema1/_/db-api/stable/ ``` All the REST Endpoints can be found in [_REST APIs for Oracle Database_](https://docs.oracle.com/en/database/oracle/oracle-database/21/dbrst/rest-endpoints.html). @@ -1052,37 +1121,51 @@ There are two basic approaches for authentication to the REST Endpoints. Certain #### Database API -To call certain REST endpoints, you must use the ORDS_PUBLIC_USER with role `SQL Administrator`, and `.spec.ordsPassword` credentials. +To call certain REST endpoints, you must use the Schema User, which is REST-Enabled with role `SQL Administrator`, and `.spec.ordsPassword` credentials. -The ORDS user also has the following additional roles: `System Administrator, SQL Developer, oracle.dbtools.autorest.any.schema`. +The Schema user also has the following additional roles: `System Administrator, SQL Developer`. -Use this ORDS user to authenticate the following: +Use this Schema user to authenticate the following: * Database APIs * Any Protected AutoRest Enabled Object APIs * Database Actions of any REST Enabled Schema ##### Examples -Some examples for the Database API usage are as follows: +Some examples for the Database API usage for REST-Enabled schema1 are as follows: - **Get all Database Components** ```sh - curl -s -k -X GET -u 'ORDS_PUBLIC_USER:<.spec.ordsPassword>' https://10.0.25.54:8443/ords/ORCLPDB1/_/db-api/stable/database/components/ | python -m json.tool + curl -s -k -X GET -u '<.spec.restEnableSchemas[].schemaName>:<.spec.ordsPassword>' http://10.0.25.54:8181/ords/schema1/_/db-api/stable/database/components/ | python -m json.tool ``` - **Get all Database Users** ```sh - curl -s -k -X GET -u 'ORDS_PUBLIC_USER:<.spec.ordsPassword>' https://10.0.25.54:8443/ords/ORCLPDB1/_/db-api/stable/database/security/users/ | python -m json.tool + curl -s -k -X GET -u '<.spec.restEnableSchemas[].schemaName>:<.spec.ordsPassword>' http://10.0.25.54:8181/ords/schema1/_/db-api/stable/database/security/users/ | python -m json.tool ``` - **Get all Tablespaces** ```sh - curl -s -k -X GET -u 'ORDS_PUBLIC_USER:<.spec.ordsPassword>' https://10.0.25.54:8443/ords/ORCLPDB1/_/db-api/stable/database/storage/tablespaces/ | python -m json.tool + curl -s -k -X GET -u '<.spec.restEnableSchemas[].schemaName>:<.spec.ordsPassword>' http://10.0.25.54:8181/ords/schema1/_/db-api/stable/database/storage/tablespaces/ | python -m json.tool ``` - **Get all Database Parameters** ```sh - curl -s -k -X GET -u 'ORDS_PUBLIC_USER:<.spec.ordsPassword>' https://10.0.25.54:8443/ords/ORCLPDB1/_/db-api/stable/database/parameters/ | python -m json.tool + curl -s -k -X GET -u '<.spec.restEnableSchemas[].schemaName>:<.spec.ordsPassword>' http://10.0.25.54:8181/ords/schema1/_/db-api/stable/database/parameters/ | python -m json.tool ``` - **Get all Feature Usage Statistics** ```sh - curl -s -k -X GET -u 'ORDS_PUBLIC_USER:<.spec.ordsPassword>' https://10.0.25.54:8443/ords/ORCLPDB1/_/db-api/stable/database/feature_usage/ | python -m json.tool + curl -s -k -X GET -u '<.spec.restEnableSchemas[].schemaName>:<.spec.ordsPassword>' http://10.0.25.54:8181/ords/schema1/_/db-api/stable/database/feature_usage/ | python -m json.tool ``` + +#### MongoDB API + +To enable the Database API for MongoDB, set `.spec.mongoDbApi` to `true`. When this is done, MongoDB applications are be able to connect to Oracle Database using the MongoDB API Access URL. For example: + +```sh +$ kubectl get oraclerestdataservice/ords-sample -o "jsonpath={.status.mongoDbApiAccessUrl}" + + mongodb://[{user}:{password}@]10.0.25.54:27017/{user}?authMechanism=PLAIN&authSource=$external&ssl=true&retryWrites=false&loadBalanced=true +``` + +* Change [{user}:{password}@] to database username and password. Retain the @ symbol but remove all the brackets. +* Change the {user} later in the URL to database username as well. + #### Advanced Usages ##### Oracle Data Pump @@ -1091,10 +1174,9 @@ The Oracle REST Data Services (ORDS) database API enables you to create Oracle D REST APIs for Oracle Data Pump Jobs can be found at [https://docs.oracle.com/en/database/oracle/oracle-database/21/dbrst/op-database-datapump-jobs-post.html](https://docs.oracle.com/en/database/oracle/oracle-database/21/dbrst/op-database-datapump-jobs-post.html). ##### REST Enabled SQL -The REST Enable SQL functionality is available to all the schemas specified in the `.spec.restEnableSchemas` attribute of the sample yaml. -Only these schemas will have access SQL Developer Web Console specified by the Database Actions URL. +The REST-Enabled SQL functionality is available to all of the schemas specified in the `.spec.restEnableSchemas` attribute of the example yaml in the sample folder. Only these schemas will have access SQL Developer Web Console specified by the Database Actions URL. -The REST Enabled SQL functionality enables REST calls to send DML, DDL and scripts to any REST enabled schema by exposing the same SQL engine used in SQL Developer and Oracle SQLcl (SQL Developer Command Line). +The REST-Enabled SQL functionality enables REST calls to send DML, DDL and scripts to any REST-Enabled schema by exposing the same SQL engine used in SQL Developer and Oracle SQLcl (SQL Developer Command Line). For example: @@ -1119,7 +1201,7 @@ Create a file called "/tmp/table.sql" with the following contents. Run the following API to run the script created in the previous example: ```sh - curl -s -k -X "POST" "https://10.0.25.54:8443/ords/<.spec.restEnableSchemas[].pdbName>/<.spec.restEnableSchemas[].urlMapping>/_/sql" \ + curl -s -k -X "POST" "http://10.0.25.54:8181/ords/<.spec.restEnableSchemas[].urlMapping>/_/sql" \ -H "Content-Type: application/sql" \ -u '<.spec.restEnableSchemas[].schemaName>:<.spec.ordsPassword>' \ -d @/tmp/table.sql @@ -1130,7 +1212,7 @@ Run the following API to run the script created in the previous example: Fetch all entries from 'DEPT' table by calling the following API ```sh - curl -s -k -X "POST" "https://10.0.25.54:8443/ords/<.spec.restEnableSchemas[].pdbName>/<.spec.restEnableSchemas[].urlMapping>/_/sql" \ + curl -s -k -X "POST" "http://10.0.25.54:8181/ords/<.spec.restEnableSchemas[].urlMapping>/_/sql" \ -H "Content-Type: application/sql" \ -u '<.spec.restEnableSchemas[].schemaName>:<.spec.ordsPassword>' \ -d $'select * from dept;' | python -m json.tool @@ -1152,16 +1234,12 @@ Database Actions can be accessed with a browser by using `.status.databaseAction ```sh $ kubectl get oraclerestdataservice/ords-sample -o "jsonpath={.status.databaseActionsUrl}" - https://10.0.25.54:8443/ords/sql-developer + http://10.0.25.54:8181/ords/sql-developer ``` -To access Database Actions, sign in by using the following code as a database user whose schema has been REST-enabled: - -* First Page: \ -PDB Name: `.spec.restEnableSchemas[].pdbName` \ -Username: `.spec.restEnableSchemas[].urlMapping` +To access Database Actions, sign in by using the following code as a database user whose schema has been REST-Enabled: -* Second Page: \ +* Login Page: \ Username: `.spec.restEnableSchemas[].schemaName` \ Password: `.spec.ordsPassword` @@ -1175,21 +1253,9 @@ Oracle APEX is a low-code development platform that enables developers to build Using APEX, developers can quickly develop and deploy compelling apps that solve real problems and provide immediate value. Developers won't need to be an expert in a vast array of technologies to deliver sophisticated solutions. Focus on solving the problem and let APEX take care of the rest. -The `OraOperator` facilitates installation of APEX in the database and also configures ORDS for it. The following section will explain installing APEX with configured ORDS: - -* For quick provisioning, use the sample **[config/samples/sidb/oraclerestdataservice_apex.yaml](../../config/samples/sidb/oraclerestdataservice_apex.yaml)** file. For example: - - kubectl apply -f oraclerestdataservice_apex.yaml - -* The APEX Password is used as a common password for `APEX_PUBLIC_USER, APEX_REST_PUBLIC_USER, APEX_LISTENER` and Apex administrator (username: `ADMIN`) mapped to secretKey. You can create APEX secret using the following command: - - ```bash - kubectl create secret generic apex-secret --from-literal=oracle_pwd= - ``` - Please refer [this](#note) section for APEX secret creation using the **[oraclerestdataservice_secrets.yaml](../../config/samples/sidb/oraclerestdataservice_secrets.yaml)** file. - -* The status of ORDS turns to `Updating` during APEX configuration, and changes to `Healthy` after successful configuration. You can also check status by using the following command: +The `OraOperator` facilitates installation of APEX in the database and also configures ORDS for it. +* Status of APEX configuration can be checked using the following command: ```sh $ kubectl get oraclerestdataservice ords-sample -o "jsonpath={.status.apexConfigured}" @@ -1197,28 +1263,27 @@ The `OraOperator` facilitates installation of APEX in the database and also conf [true] ``` -* If you configure APEX after ORDS is installed, then ORDS pods will be deleted and recreated. - Application Express can be accessed via browser using `.status.apexUrl` in the following command. ```sh $ kubectl get oraclerestdataservice/ords-sample -o "jsonpath={.status.apexUrl}" - https://10.0.25.54:8443/ords/ORCLPDB1/apex + http://10.0.25.54:8181/ords/apex ``` -Sign in to Administration services using -workspace: `INTERNAL` -username: `ADMIN` -password: `.spec.apexPassword` +Sign in to Administration services using \ +workspace: `INTERNAL` \ +username: `ADMIN` \ +password: `Welcome_1` ![application-express-admin-home](/images/sidb/application-express-admin-home.png) **Note:** +- Oracle strongly recommends that you change the default APEX admin password. - By default, the full development environment is initialized in APEX. After deployment, you can change it manually to the runtime environment. To change environments, run the script `apxdevrm.sql` after connecting to the primary database from the ORDS pod as the `SYS` user with `SYSDBA` privilege. For detailed instructions, see: [Converting a Full Development Environment to a Runtime Environment](https://docs.oracle.com/en/database/oracle/application-express/21.2/htmig/converting-between-runtime-and-full-development-environments.html#GUID-B0621B40-3441-44ED-9D86-29B058E26BE9). ### Delete ORDS -- To delete ORDS run the following command: +- To delete ORDS, run the following command: kubectl delete oraclerestdataservice ords-sample diff --git a/go.mod b/go.mod index 1f30279f..863f2e99 100644 --- a/go.mod +++ b/go.mod @@ -1,56 +1,56 @@ module github.com/oracle/oracle-database-operator -go 1.21 +go 1.23.3 require ( - github.com/go-logr/logr v1.3.0 - github.com/onsi/ginkgo/v2 v2.13.0 - github.com/onsi/gomega v1.29.0 - github.com/oracle/oci-go-sdk/v65 v65.49.3 - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.62.0 + github.com/go-logr/logr v1.4.2 + github.com/onsi/ginkgo/v2 v2.20.2 + github.com/onsi/gomega v1.34.2 + github.com/oracle/oci-go-sdk/v65 v65.77.1 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.2 go.uber.org/zap v1.26.0 - golang.org/x/text v0.14.0 + golang.org/x/text v0.19.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.29.2 - k8s.io/apimachinery v0.29.2 - k8s.io/cli-runtime v0.29.2 - k8s.io/client-go v0.29.2 - k8s.io/kubectl v0.29.2 - sigs.k8s.io/controller-runtime v0.16.2 - sigs.k8s.io/yaml v1.3.0 + k8s.io/api v0.31.3 + k8s.io/apimachinery v0.31.3 + k8s.io/cli-runtime v0.31.3 + k8s.io/client-go v0.31.3 + k8s.io/kubectl v0.31.3 + sigs.k8s.io/controller-runtime v0.19.3 + sigs.k8s.io/yaml v1.4.0 ) require ( github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect - github.com/evanphx/json-patch/v5 v5.6.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/fatih/camelcase v1.0.0 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect - github.com/fvbommel/sortorder v1.1.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-errors/errors v1.4.2 // indirect - github.com/go-logr/zapr v1.2.4 // indirect + github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.3 // indirect - github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/go-openapi/swag v0.22.4 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gofrs/flock v0.8.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.0.1 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect + github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect github.com/imdario/mergo v0.3.6 // indirect @@ -59,10 +59,9 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect - github.com/moby/spdystream v0.2.0 // indirect - github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect + github.com/moby/spdystream v0.4.0 // indirect + github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect @@ -70,37 +69,38 @@ require ( github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_golang v1.16.0 // indirect - github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect + github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sony/gobreaker v0.5.0 // indirect - github.com/spf13/cobra v1.7.0 // indirect + github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect - golang.org/x/net v0.19.0 // indirect - golang.org/x/oauth2 v0.10.0 // indirect - golang.org/x/sync v0.5.0 // indirect - golang.org/x/sys v0.15.0 // indirect - golang.org/x/term v0.15.0 // indirect + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/term v0.24.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.16.1 // indirect + golang.org/x/tools v0.24.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.31.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/apiextensions-apiserver v0.28.0 // indirect - k8s.io/component-base v0.29.2 // indirect - k8s.io/klog/v2 v2.110.1 // indirect - k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + k8s.io/apiextensions-apiserver v0.31.2 // indirect + k8s.io/component-base v0.31.3 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect - sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect + sigs.k8s.io/kustomize/api v0.17.2 // indirect + sigs.k8s.io/kustomize/kyaml v0.17.1 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 08185f3e..d23debb8 100644 --- a/go.sum +++ b/go.sum @@ -6,56 +6,56 @@ github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= -github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/fvbommel/sortorder v1.1.0 h1:fUmoe+HLsBTctBDoaBwpQo5N+nrCp8g/BjKb/6ZQmYw= -github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= -github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -65,7 +65,6 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= @@ -73,9 +72,8 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= @@ -86,37 +84,32 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA= +github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -126,16 +119,16 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= +github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8= +github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -147,40 +140,40 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= -github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= -github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= -github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= -github.com/oracle/oci-go-sdk/v65 v65.49.3 h1:HHv+XMZiBYHtoU8Ac/fURdp9v1vJPPCpIbJAWeadREw= -github.com/oracle/oci-go-sdk/v65 v65.49.3/go.mod h1:IBEV9l1qBzUpo7zgGaRUhbB05BVfcDGYRFBCPlTcPp0= +github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4= +github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag= +github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= +github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= +github.com/oracle/oci-go-sdk/v65 v65.77.1 h1:gqjTXIUWvTihkn470AclxSAMcR1JecqjD2IUtp+sDIU= +github.com/oracle/oci-go-sdk/v65 v65.77.1/go.mod h1:IBEV9l1qBzUpo7zgGaRUhbB05BVfcDGYRFBCPlTcPp0= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.62.0 h1:55138zTXw/yRYizPxZ672I/aDD7Yte3uYRAfUjWUu2M= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.62.0/go.mod h1:j51242bf6LQwvJ1JPKWApzTnifmCwcQq0i1p29ylWiM= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.2 h1:SyoVBXD/r0PntR1rprb90ClI32FSUNOCWqqTatnipHM= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.78.2/go.mod h1:SvsRXw4m1F2vk7HquU5h475bFpke27mIUswfyw9u3ug= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sony/gobreaker v0.5.0 h1:dRCvqm0P490vZPmy7ppEk2qCnCieBooFJ+YoXGYB+yg= github.com/sony/gobreaker v0.5.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -188,91 +181,75 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= -golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -283,9 +260,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= -golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -294,8 +270,6 @@ gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= @@ -310,14 +284,13 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -328,35 +301,35 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= -k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= -k8s.io/apiextensions-apiserver v0.28.0 h1:CszgmBL8CizEnj4sj7/PtLGey6Na3YgWyGCPONv7E9E= -k8s.io/apiextensions-apiserver v0.28.0/go.mod h1:uRdYiwIuu0SyqJKriKmqEN2jThIJPhVmOWETm8ud1VE= -k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= -k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= -k8s.io/cli-runtime v0.29.2 h1:smfsOcT4QujeghsNjECKN3lwyX9AwcFU0nvJ7sFN3ro= -k8s.io/cli-runtime v0.29.2/go.mod h1:KLisYYfoqeNfO+MkTWvpqIyb1wpJmmFJhioA0xd4MW8= -k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg= -k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA= -k8s.io/component-base v0.29.2 h1:lpiLyuvPA9yV1aQwGLENYyK7n/8t6l3nn3zAtFTJYe8= -k8s.io/component-base v0.29.2/go.mod h1:BfB3SLrefbZXiBfbM+2H1dlat21Uewg/5qtKOl8degM= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= -k8s.io/kubectl v0.29.2 h1:uaDYaBhumvkwz0S2XHt36fK0v5IdNgL7HyUniwb2IUo= -k8s.io/kubectl v0.29.2/go.mod h1:BhizuYBGcKaHWyq+G7txGw2fXg576QbPrrnQdQDZgqI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.16.2 h1:mwXAVuEk3EQf478PQwQ48zGOXvW27UJc8NHktQVuIPU= -sigs.k8s.io/controller-runtime v0.16.2/go.mod h1:vpMu3LpI5sYWtujJOa2uPK61nB5rbwlN7BAB8aSLvGU= +k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8= +k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE= +k8s.io/apiextensions-apiserver v0.31.2 h1:W8EwUb8+WXBLu56ser5IudT2cOho0gAKeTOnywBLxd0= +k8s.io/apiextensions-apiserver v0.31.2/go.mod h1:i+Geh+nGCJEGiCGR3MlBDkS7koHIIKWVfWeRFiOsUcM= +k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4= +k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/cli-runtime v0.31.3 h1:fEQD9Xokir78y7pVK/fCJN090/iYNrLHpFbGU4ul9TI= +k8s.io/cli-runtime v0.31.3/go.mod h1:Q2jkyTpl+f6AtodQvgDI8io3jrfr+Z0LyQBPJJ2Btq8= +k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4= +k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs= +k8s.io/component-base v0.31.3 h1:DMCXXVx546Rfvhj+3cOm2EUxhS+EyztH423j+8sOwhQ= +k8s.io/component-base v0.31.3/go.mod h1:xME6BHfUOafRgT0rGVBGl7TuSg8Z9/deT7qq6w7qjIU= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/kubectl v0.31.3 h1:3r111pCjPsvnR98oLLxDMwAeM6OPGmPty6gSKaLTQes= +k8s.io/kubectl v0.31.3/go.mod h1:lhMECDCbJN8He12qcKqs2QfmVo9Pue30geovBVpH5fs= +k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 h1:b2FmK8YH+QEwq/Sy2uAEhmqL5nPfGYbJOcaqjeYYZoA= +k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.19.3 h1:XO2GvC9OPftRst6xWCpTgBZO04S2cbp0Qqkj8bX1sPw= +sigs.k8s.io/controller-runtime v0.19.3/go.mod h1:j4j87DqtsThvwTv5/Tc5NFRyyF/RF0ip4+62tbTSIUM= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= -sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= -sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM7vh3b7HvGNfXrJ/xL6BDMS0v1V/HHg5U= -sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3/go.mod h1:JWP1Fj0VWGHyw3YUPjXSQnRnrwezrZSrApfX5S0nIag= +sigs.k8s.io/kustomize/api v0.17.2 h1:E7/Fjk7V5fboiuijoZHgs4aHuexi5Y2loXlVOAVAG5g= +sigs.k8s.io/kustomize/api v0.17.2/go.mod h1:UWTz9Ct+MvoeQsHcJ5e+vziRRkwimm3HytpZgIYqye0= +sigs.k8s.io/kustomize/kyaml v0.17.1 h1:TnxYQxFXzbmNG6gOINgGWQt09GghzgTP6mIurOgrLCQ= +sigs.k8s.io/kustomize/kyaml v0.17.1/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/main.go b/main.go index 4174e97d..ee9992b7 100644 --- a/main.go +++ b/main.go @@ -63,8 +63,12 @@ import ( databasev1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" databasecontroller "github.com/oracle/oracle-database-operator/controllers/database" + dataguardcontroller "github.com/oracle/oracle-database-operator/controllers/dataguard" + databasev4 "github.com/oracle/oracle-database-operator/apis/database/v4" + observabilityv1 "github.com/oracle/oracle-database-operator/apis/observability/v1" observabilityv1alpha1 "github.com/oracle/oracle-database-operator/apis/observability/v1alpha1" + observabilityv4 "github.com/oracle/oracle-database-operator/apis/observability/v4" observabilitycontroller "github.com/oracle/oracle-database-operator/controllers/observability" // +kubebuilder:scaffold:imports ) @@ -79,6 +83,9 @@ func init() { utilruntime.Must(observabilityv1alpha1.AddToScheme(scheme)) utilruntime.Must(monitorv1.AddToScheme(scheme)) utilruntime.Must(databasev1alpha1.AddToScheme(scheme)) + utilruntime.Must(databasev4.AddToScheme(scheme)) + utilruntime.Must(observabilityv1.AddToScheme(scheme)) + utilruntime.Must(observabilityv4.AddToScheme(scheme)) // +kubebuilder:scaffold:scheme } @@ -221,14 +228,22 @@ func main() { setupLog.Error(err, "unable to create webhook", "webhook", "OracleRestDataService") os.Exit(1) } - if err = (&databasev1alpha1.PDB{}).SetupWebhookWithManager(mgr); err != nil { + if err = (&databasev4.PDB{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "PDB") os.Exit(1) } - if err = (&databasev1alpha1.CDB{}).SetupWebhookWithManager(mgr); err != nil { + if err = (&databasev4.LRPDB{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "LRPDB") + os.Exit(1) + } + if err = (&databasev4.CDB{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "CDB") os.Exit(1) } + if err = (&databasev4.LREST{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "LREST") + os.Exit(1) + } if err = (&databasev1alpha1.AutonomousDatabase{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "AutonomousDatabase") os.Exit(1) @@ -245,17 +260,58 @@ func main() { setupLog.Error(err, "unable to create webhook", "webhook", "AutonomousContainerDatabase") os.Exit(1) } + if err = (&databasev4.AutonomousDatabase{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "AutonomousDatabase") + os.Exit(1) + } + if err = (&databasev4.AutonomousDatabaseBackup{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "AutonomousDatabaseBackup") + os.Exit(1) + } + if err = (&databasev4.AutonomousDatabaseRestore{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "AutonomousDatabaseRestore") + os.Exit(1) + } + if err = (&databasev4.AutonomousContainerDatabase{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "AutonomousContainerDatabase") + os.Exit(1) + } if err = (&databasev1alpha1.DataguardBroker{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "DataguardBroker") os.Exit(1) } if err = (&databasev1alpha1.ShardingDatabase{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "ShardingDatabase") + os.Exit(1) + } + if err = (&databasev1alpha1.DbcsSystem{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "DbcsSystem") + os.Exit(1) + } + if err = (&databasev4.ShardingDatabase{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "ShardingDatabase") } if err = (&observabilityv1alpha1.DatabaseObserver{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "DatabaseObserver") os.Exit(1) } + if err = (&databasev1alpha1.DbcsSystem{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "DbcsSystem") + os.Exit(1) + } + if err = (&databasev4.DbcsSystem{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "DbcsSystem") + os.Exit(1) + } + if err = (&observabilityv1.DatabaseObserver{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "DatabaseObserver") + os.Exit(1) + } + + if err = (&observabilityv4.DatabaseObserver{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "DatabaseObserver") + os.Exit(1) + } } // PDB Reconciler @@ -270,6 +326,18 @@ func main() { os.Exit(1) } + // LRPDBR Reconciler + if err = (&databasecontroller.LRPDBReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Log: ctrl.Log.WithName("controllers").WithName("LRPDB"), + Interval: time.Duration(i), + Recorder: mgr.GetEventRecorderFor("LRPDB"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "LRPDB") + os.Exit(1) + } + // CDB Reconciler if err = (&databasecontroller.CDBReconciler{ Client: mgr.GetClient(), @@ -282,9 +350,23 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "CDB") os.Exit(1) } - if err = (&databasecontroller.DataguardBrokerReconciler{ + + // LREST Reconciler + if err = (&databasecontroller.LRESTReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Config: mgr.GetConfig(), + Log: ctrl.Log.WithName("controllers").WithName("LREST"), + Interval: time.Duration(i), + Recorder: mgr.GetEventRecorderFor("LREST"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "LREST") + os.Exit(1) + } + + if err = (&dataguardcontroller.DataguardBrokerReconciler{ Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("database").WithName("DataguardBroker"), + Log: ctrl.Log.WithName("controllers").WithName("dataguard").WithName("DataguardBroker"), Scheme: mgr.GetScheme(), Config: mgr.GetConfig(), Recorder: mgr.GetEventRecorderFor("DataguardBroker"), @@ -293,6 +375,15 @@ func main() { os.Exit(1) } + if err = (&databasecontroller.OrdsSrvsReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + // Config: mgr.GetConfig(), + Recorder: mgr.GetEventRecorderFor("OrdsSrvs"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "OrdsSrvs") + } + // Observability DatabaseObserver Reconciler if err = (&observabilitycontroller.DatabaseObserverReconciler{ Client: mgr.GetClient(), @@ -304,17 +395,37 @@ func main() { os.Exit(1) } + if err = (&databasev4.SingleInstanceDatabase{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "SingleInstanceDatabase") + os.Exit(1) + } + if err = (&databasev4.DataguardBroker{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "DataguardBroker") + os.Exit(1) + } + if err = (&databasev4.OracleRestDataService{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "OracleRestDataService") + os.Exit(1) + } // +kubebuilder:scaffold:builder // Add index for PDB CR to enable mgr to cache PDBs indexFunc := func(obj client.Object) []string { - return []string{obj.(*databasev1alpha1.PDB).Spec.PDBName} + return []string{obj.(*databasev4.PDB).Spec.PDBName} } - if err = cache.IndexField(context.TODO(), &databasev1alpha1.PDB{}, "spec.pdbName", indexFunc); err != nil { + if err = cache.IndexField(context.TODO(), &databasev4.PDB{}, "spec.pdbName", indexFunc); err != nil { setupLog.Error(err, "unable to create index function for ", "controller", "PDB") os.Exit(1) } + indexFunc2 := func(obj client.Object) []string { + return []string{obj.(*databasev4.LRPDB).Spec.LRPDBName} + } + if err = cache.IndexField(context.TODO(), &databasev4.LRPDB{}, "spec.pdbName", indexFunc2); err != nil { + setupLog.Error(err, "unable to create index function for ", "controller", "LRPDB") + os.Exit(1) + } + setupLog.Info("starting manager") if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { setupLog.Error(err, "problem running manager") diff --git a/oracle-database-operator.yaml b/oracle-database-operator.yaml index 504fc7cd..70147329 100644 --- a/oracle-database-operator.yaml +++ b/oracle-database-operator.yaml @@ -9,10 +9,22 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null + cert-manager.io/inject-ca-from: oracle-database-operator-system/oracle-database-operator-serving-cert + controller-gen.kubebuilder.io/version: v0.16.5 name: autonomouscontainerdatabases.database.oracle.com spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /convert + conversionReviewVersions: + - v1alpha1 + - v1 + - v4 group: database.oracle.com names: kind: AutonomousContainerDatabase @@ -37,18 +49,14 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: AutonomousContainerDatabase is the Schema for the autonomouscontainerdatabases API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: AutonomousContainerDatabaseSpec defines the desired state of AutonomousContainerDatabase properties: action: enum: @@ -57,7 +65,6 @@ spec: - TERMINATE type: string autonomousContainerDatabaseOCID: - description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run "make" to regenerate code after modifying this file' type: string autonomousExadataVMClusterOCID: type: string @@ -73,7 +80,6 @@ spec: default: false type: boolean ociConfig: - description: "*********************** *\tOCI config ***********************" properties: configMapName: type: string @@ -81,17 +87,84 @@ spec: type: string type: object patchModel: - description: 'AutonomousContainerDatabasePatchModelEnum Enum with underlying type: string' enum: - RELEASE_UPDATES - RELEASE_UPDATE_REVISIONS type: string type: object status: - description: AutonomousContainerDatabaseStatus defines the observed state of AutonomousContainerDatabase properties: lifecycleState: - description: 'INSERT ADDITIONAL STATUS FIELD - define observed state of cluster Important: Run "make" to regenerate code after modifying this file' + type: string + timeCreated: + type: string + required: + - lifecycleState + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.displayName + name: DisplayName + type: string + - jsonPath: .status.lifecycleState + name: State + type: string + - jsonPath: .status.timeCreated + name: Created + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + action: + enum: + - SYNC + - RESTART + - TERMINATE + type: string + autonomousContainerDatabaseOCID: + type: string + autonomousExadataVMClusterOCID: + type: string + compartmentOCID: + type: string + displayName: + type: string + freeformTags: + additionalProperties: + type: string + type: object + hardLink: + default: false + type: boolean + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + patchModel: + enum: + - RELEASE_UPDATES + - RELEASE_UPDATE_REVISIONS + type: string + type: object + status: + properties: + lifecycleState: type: string timeCreated: type: string @@ -103,21 +176,27 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null + cert-manager.io/inject-ca-from: oracle-database-operator-system/oracle-database-operator-serving-cert + controller-gen.kubebuilder.io/version: v0.16.5 name: autonomousdatabasebackups.database.oracle.com spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /convert + conversionReviewVersions: + - v1alpha1 + - v1 + - v4 group: database.oracle.com names: kind: AutonomousDatabaseBackup @@ -148,18 +227,14 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: AutonomousDatabaseBackup is the Schema for the autonomousdatabasebackups API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: AutonomousDatabaseBackupSpec defines the desired state of AutonomousDatabaseBackup properties: autonomousDatabaseBackupOCID: type: string @@ -168,7 +243,6 @@ spec: isLongTermBackup: type: boolean ociConfig: - description: "*********************** *\tOCI config ***********************" properties: configMapName: type: string @@ -178,10 +252,8 @@ spec: retentionPeriodInDays: type: integer target: - description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run "make" to regenerate code after modifying this file' properties: k8sADB: - description: "*********************** *\tADB spec ***********************" properties: name: type: string @@ -194,7 +266,6 @@ spec: type: object type: object status: - description: AutonomousDatabaseBackupStatus defines the observed state of AutonomousDatabaseBackup properties: autonomousDatabaseOCID: type: string @@ -207,14 +278,103 @@ spec: isAutomatic: type: boolean lifecycleState: - description: 'AutonomousDatabaseBackupLifecycleStateEnum Enum with underlying type: string' type: string timeEnded: type: string timeStarted: type: string type: - description: 'AutonomousDatabaseBackupTypeEnum Enum with underlying type: string' + type: string + required: + - autonomousDatabaseOCID + - compartmentOCID + - dbDisplayName + - dbName + - isAutomatic + - lifecycleState + - type + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.lifecycleState + name: State + type: string + - jsonPath: .status.dbDisplayName + name: DB DisplayName + type: string + - jsonPath: .status.type + name: Type + type: string + - jsonPath: .status.timeStarted + name: Started + type: string + - jsonPath: .status.timeEnded + name: Ended + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + autonomousDatabaseBackupOCID: + type: string + displayName: + type: string + isLongTermBackup: + type: boolean + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + retentionPeriodInDays: + type: integer + target: + properties: + k8sADB: + properties: + name: + type: string + type: object + ociADB: + properties: + ocid: + type: string + type: object + type: object + type: object + status: + properties: + autonomousDatabaseOCID: + type: string + compartmentOCID: + type: string + dbDisplayName: + type: string + dbName: + type: string + isAutomatic: + type: boolean + lifecycleState: + type: string + timeEnded: + type: string + timeStarted: + type: string + type: type: string required: - autonomousDatabaseOCID @@ -230,21 +390,27 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null + cert-manager.io/inject-ca-from: oracle-database-operator-system/oracle-database-operator-serving-cert + controller-gen.kubebuilder.io/version: v0.16.5 name: autonomousdatabaserestores.database.oracle.com spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /convert + conversionReviewVersions: + - v1alpha1 + - v1 + - v4 group: database.oracle.com names: kind: AutonomousDatabaseRestore @@ -269,21 +435,16 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: AutonomousDatabaseRestore is the Schema for the autonomousdatabaserestores API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: AutonomousDatabaseRestoreSpec defines the desired state of AutonomousDatabaseRestore properties: ociConfig: - description: "*********************** *\tOCI config ***********************" properties: configMapName: type: string @@ -293,7 +454,6 @@ spec: source: properties: k8sADBBackup: - description: 'EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.' properties: name: type: string @@ -301,15 +461,12 @@ spec: pointInTime: properties: timestamp: - description: 'The timestamp must follow this format: YYYY-MM-DD HH:MM:SS GMT' type: string type: object type: object target: - description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run "make" to regenerate code after modifying this file' properties: k8sADB: - description: "*********************** *\tADB spec ***********************" properties: name: type: string @@ -325,15 +482,98 @@ spec: - target type: object status: - description: AutonomousDatabaseRestoreStatus defines the observed state of AutonomousDatabaseRestore properties: dbName: type: string displayName: - description: 'INSERT ADDITIONAL STATUS FIELD - define observed state of cluster Important: Run "make" to regenerate code after modifying this file' type: string status: - description: 'WorkRequestStatusEnum Enum with underlying type: string' + type: string + timeAccepted: + type: string + timeEnded: + type: string + timeStarted: + type: string + workRequestOCID: + type: string + required: + - dbName + - displayName + - status + - workRequestOCID + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.displayName + name: DbDisplayName + type: string + - jsonPath: .status.dbName + name: DbName + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + source: + properties: + k8sADBBackup: + properties: + name: + type: string + type: object + pointInTime: + properties: + timestamp: + type: string + type: object + type: object + target: + properties: + k8sADB: + properties: + name: + type: string + type: object + ociADB: + properties: + ocid: + type: string + type: object + type: object + required: + - source + - target + type: object + status: + properties: + dbName: + type: string + displayName: + type: string + status: type: string timeAccepted: type: string @@ -354,21 +594,27 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null + cert-manager.io/inject-ca-from: oracle-database-operator-system/oracle-database-operator-serving-cert + controller-gen.kubebuilder.io/version: v0.16.5 name: autonomousdatabases.database.oracle.com spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /convert + conversionReviewVersions: + - v1alpha1 + - v1 + - v4 group: database.oracle.com names: kind: AutonomousDatabase @@ -408,54 +654,67 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: AutonomousDatabase is the Schema for the autonomousdatabases API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: 'AutonomousDatabaseSpec defines the desired state of AutonomousDatabase Important: Run "make" to regenerate code after modifying this file' properties: - details: - description: AutonomousDatabaseDetails defines the detail information of AutonomousDatabase, corresponding to oci-go-sdk/database/AutonomousDatabase + action: + enum: + - "" + - Create + - Sync + - Update + - Stop + - Start + - Terminate + - Clone + type: string + clone: properties: adminPassword: properties: k8sSecret: - description: "*********************** *\tSecret specs ***********************" properties: name: type: string type: object ociSecret: properties: - ocid: + id: type: string type: object type: object autonomousContainerDatabase: - description: ACDSpec defines the spec of the target for backup/restore runs. The name could be the name of an AutonomousDatabase or an AutonomousDatabaseBackup properties: - k8sACD: - description: "*********************** *\tACD specs ***********************" + k8sAcd: properties: name: type: string type: object - ociACD: + ociAcd: properties: - ocid: + id: type: string type: object type: object - autonomousDatabaseOCID: + cloneType: + enum: + - FULL + - METADATA type: string - compartmentOCID: + compartmentId: + type: string + computeCount: + type: number + computeModel: + enum: + - ECPU + - OCPU type: string cpuCoreCount: type: integer @@ -466,7 +725,6 @@ spec: dbVersion: type: string dbWorkload: - description: 'AutonomousDatabaseDbWorkloadEnum Enum with underlying type: string' enum: - OLTP - DW @@ -479,84 +737,158 @@ spec: additionalProperties: type: string type: object + isAccessControlEnabled: + type: boolean isAutoScalingEnabled: type: boolean isDedicated: type: boolean + isFreeTier: + type: boolean + isMtlsConnectionRequired: + type: boolean licenseModel: - description: 'AutonomousDatabaseLicenseModelEnum Enum with underlying type: string' enum: - LICENSE_INCLUDED - BRING_YOUR_OWN_LICENSE type: string - lifecycleState: - description: 'AutonomousDatabaseLifecycleStateEnum Enum with underlying type: string' + nsgIds: + items: + type: string + type: array + ocpuCount: + type: number + privateEndpointLabel: + type: string + subnetId: type: string - networkAccess: + whitelistedIps: + items: + type: string + type: array + type: object + details: + properties: + adminPassword: properties: - accessControlList: - items: - type: string - type: array - accessType: - enum: - - "" - - PUBLIC - - RESTRICTED - - PRIVATE - type: string - isAccessControlEnabled: - type: boolean - isMTLSConnectionRequired: - type: boolean - privateEndpoint: + k8sSecret: properties: - hostnamePrefix: + name: type: string - nsgOCIDs: - items: - type: string - type: array - subnetOCID: + type: object + ociSecret: + properties: + id: type: string type: object type: object - wallet: + autonomousContainerDatabase: properties: - name: - type: string - password: + k8sAcd: properties: - k8sSecret: - description: "*********************** *\tSecret specs ***********************" - properties: - name: - type: string - type: object - ociSecret: - properties: - ocid: - type: string - type: object + name: + type: string + type: object + ociAcd: + properties: + id: + type: string type: object type: object + compartmentId: + type: string + computeCount: + type: number + computeModel: + enum: + - ECPU + - OCPU + type: string + cpuCoreCount: + type: integer + dataStorageSizeInTBs: + type: integer + dbName: + type: string + dbVersion: + type: string + dbWorkload: + enum: + - OLTP + - DW + - AJD + - APEX + type: string + displayName: + type: string + freeformTags: + additionalProperties: + type: string + type: object + id: + type: string + isAccessControlEnabled: + type: boolean + isAutoScalingEnabled: + type: boolean + isDedicated: + type: boolean + isFreeTier: + type: boolean + isMtlsConnectionRequired: + type: boolean + licenseModel: + enum: + - LICENSE_INCLUDED + - BRING_YOUR_OWN_LICENSE + type: string + nsgIds: + items: + type: string + type: array + ocpuCount: + type: number + privateEndpointLabel: + type: string + subnetId: + type: string + whitelistedIps: + items: + type: string + type: array type: object hardLink: default: false type: boolean ociConfig: - description: "*********************** *\tOCI config ***********************" properties: configMapName: type: string secretName: type: string type: object + wallet: + properties: + name: + type: string + password: + properties: + k8sSecret: + properties: + name: + type: string + type: object + ociSecret: + properties: + id: + type: string + type: object + type: object + type: object required: - - details + - action type: object status: - description: AutonomousDatabaseStatus defines the observed state of AutonomousDatabase properties: allConnectionStrings: items: @@ -578,36 +910,29 @@ spec: type: array conditions: items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n \ttype FooStatus struct{ \t // Represents the observations of a foo's current state. \t // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" \t // +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map \t // +listMapKey=type \t Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields \t}" properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: - description: status of the condition, one of True, False, Unknown. enum: - "True" - "False" - Unknown type: string type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -623,7 +948,6 @@ spec: - type x-kubernetes-list-type: map lifecycleState: - description: 'INSERT ADDITIONAL STATUS FIELD - define observed state of cluster Important: Run "make" to regenerate code after modifying this file' type: string timeCreated: type: string @@ -632,433 +956,316 @@ spec: type: object type: object served: true - storage: true + storage: false subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - cert-manager.io/inject-ca-from: oracle-database-operator-system/oracle-database-operator-serving-cert - controller-gen.kubebuilder.io/version: v0.6.1 - name: cdbs.database.oracle.com -spec: - group: database.oracle.com - names: - kind: CDB - listKind: CDBList - plural: cdbs - singular: cdb - scope: Namespaced - versions: - additionalPrinterColumns: - - description: Name of the CDB - jsonPath: .spec.cdbName - name: CDB Name + - jsonPath: .spec.details.displayName + name: Display Name type: string - - description: ' Name of the DB Server' - jsonPath: .spec.dbServer - name: DB Server + - jsonPath: .spec.details.dbName + name: Db Name type: string - - description: DB server port - jsonPath: .spec.dbPort - name: DB Port - type: integer - - description: ' string of the tnsalias' - jsonPath: .spec.dbTnsurl - name: TNS STRING + - jsonPath: .status.lifecycleState + name: State type: string - - description: Replicas - jsonPath: .spec.replicas - name: Replicas + - jsonPath: .spec.details.isDedicated + name: Dedicated + type: string + - jsonPath: .spec.details.cpuCoreCount + name: OCPUs type: integer - - description: Status of the CDB Resource - jsonPath: .status.phase - name: Status + - jsonPath: .spec.details.dataStorageSizeInTBs + name: Storage (TB) + type: integer + - jsonPath: .spec.details.dbWorkload + name: Workload Type type: string - - description: Error message, if any - jsonPath: .status.msg - name: Message + - jsonPath: .status.timeCreated + name: Created type: string - name: v1alpha1 + name: v4 schema: openAPIV3Schema: - description: CDB is the Schema for the cdbs API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: CDBSpec defines the desired state of CDB properties: - cdbAdminPwd: - description: Password for the CDB Administrator to manage PDB lifecycle - properties: - secret: - description: CDBSecret defines the secretName - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - required: - - secret - type: object - cdbAdminUser: - description: User in the root container with sysdba priviledges to manage PDB lifecycle - properties: - secret: - description: CDBSecret defines the secretName - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - required: - - secret - type: object - cdbName: - description: Name of the CDB + action: + enum: + - "" + - Create + - Sync + - Update + - Stop + - Start + - Terminate + - Clone type: string - cdbTlsCrt: + clone: properties: - secret: - description: CDBSecret defines the secretName + adminPassword: properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName + k8sSecret: + properties: + name: + type: string + type: object + ociSecret: + properties: + id: + type: string + type: object type: object - required: - - secret - type: object - cdbTlsKey: - properties: - secret: - description: CDBSecret defines the secretName + autonomousContainerDatabase: properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName + k8sAcd: + properties: + name: + type: string + type: object + ociAcd: + properties: + id: + type: string + type: object type: object - required: - - secret - type: object - dbPort: - description: DB server port - type: integer - dbServer: - description: Name of the DB server - type: string - dbTnsurl: - type: string - nodeSelector: - additionalProperties: - type: string - description: Node Selector for running the Pod - type: object - ordsImage: - description: ORDS Image Name - type: string - ordsImagePullPolicy: - description: ORDS Image Pull Policy - enum: - - Always - - Never - type: string - ordsImagePullSecret: - description: The name of the image pull secret in case of a private docker repository. - type: string - ordsPort: - description: ORDS server port. For now, keep it as 8888. TO BE USED IN FUTURE RELEASE. - type: integer - ordsPwd: - description: Password for user ORDS_PUBLIC_USER - properties: - secret: - description: CDBSecret defines the secretName - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - required: - - secret - type: object - replicas: - description: Number of ORDS Containers to create - type: integer - serviceName: - description: Name of the CDB Service - type: string - sysAdminPwd: - description: Password for the CDB System Administrator - properties: - secret: - description: CDBSecret defines the secretName - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - required: - - secret - type: object - webServerPwd: - description: Password for the Web Server User - properties: - secret: - description: CDBSecret defines the secretName - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - required: - - secret - type: object - webServerUser: - description: Web Server User with SQL Administrator role to allow us to authenticate to the PDB Lifecycle Management REST endpoints - properties: - secret: - description: CDBSecret defines the secretName - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName + cloneType: + enum: + - FULL + - METADATA + type: string + compartmentId: + type: string + computeCount: + type: number + computeModel: + enum: + - ECPU + - OCPU + type: string + cpuCoreCount: + type: integer + dataStorageSizeInTBs: + type: integer + dbName: + type: string + dbVersion: + type: string + dbWorkload: + enum: + - OLTP + - DW + - AJD + - APEX + type: string + displayName: + type: string + freeformTags: + additionalProperties: + type: string type: object - required: - - secret + isAccessControlEnabled: + type: boolean + isAutoScalingEnabled: + type: boolean + isDedicated: + type: boolean + isFreeTier: + type: boolean + isMtlsConnectionRequired: + type: boolean + licenseModel: + enum: + - LICENSE_INCLUDED + - BRING_YOUR_OWN_LICENSE + type: string + nsgIds: + items: + type: string + type: array + ocpuCount: + type: number + privateEndpointLabel: + type: string + subnetId: + type: string + whitelistedIps: + items: + type: string + type: array type: object - type: object - status: - description: CDBStatus defines the observed state of CDB - properties: - msg: - description: Message - type: string - phase: - description: Phase of the CDB Resource - type: string - status: - description: CDB Resource Status - type: boolean - required: - - phase - - status - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null - name: databaseobservers.observability.oracle.com -spec: - group: observability.oracle.com - names: - kind: DatabaseObserver - listKind: DatabaseObserverList - plural: databaseobservers - singular: databaseobserver - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .status.exporterConfig - name: ExporterConfig - type: string - - jsonPath: .status.status - name: Status - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: DatabaseObserver is the Schema for the databaseobservers API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: DatabaseObserverSpec defines the desired state of DatabaseObserver - properties: - database: - description: DatabaseObserverDatabase defines the database details used for DatabaseObserver + details: properties: - dbConnectionString: - properties: - key: - type: string - secret: - type: string - type: object - dbPassword: - properties: - key: - type: string - secret: - type: string - vaultOCID: - type: string - vaultSecretName: - type: string - type: object - dbUser: - properties: - key: - type: string - secret: - type: string - type: object - dbWallet: + adminPassword: properties: - key: - type: string - secret: - type: string + k8sSecret: + properties: + name: + type: string + type: object + ociSecret: + properties: + id: + type: string + type: object type: object - type: object - exporter: - description: DatabaseObserverExporterConfig defines the configuration details related to the exporters of DatabaseObserver - properties: - configuration: + autonomousContainerDatabase: properties: - configmap: - description: ConfigMapDetails defines the configmap name + k8sAcd: properties: - configmapName: + name: type: string - key: + type: object + ociAcd: + properties: + id: type: string type: object type: object - image: + compartmentId: type: string - service: - description: DatabaseObserverService defines the exporter service component of DatabaseObserver - properties: - port: - format: int32 - type: integer - type: object - type: object - ociConfig: - properties: - configMapName: + computeCount: + type: number + computeModel: + enum: + - ECPU + - OCPU type: string - secretName: + cpuCoreCount: + type: integer + dataStorageSizeInTBs: + type: integer + dbName: type: string - type: object - prometheus: - description: PrometheusConfig defines the generated resources for Prometheus - properties: - labels: + dbVersion: + type: string + dbWorkload: + enum: + - OLTP + - DW + - AJD + - APEX + type: string + displayName: + type: string + freeformTags: additionalProperties: type: string type: object - port: + id: type: string - type: object - replicas: - format: int32 - type: integer - type: object - status: - description: DatabaseObserverStatus defines the observed state of DatabaseObserver - properties: - conditions: - description: 'INSERT ADDITIONAL STATUS FIELD - define observed state of cluster Important: Run "make" to regenerate code after modifying this file' - items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n \ttype FooStatus struct{ \t // Represents the observations of a foo's current state. \t // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" \t // +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map \t // +listMapKey=type \t Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields \t}" + isAccessControlEnabled: + type: boolean + isAutoScalingEnabled: + type: boolean + isDedicated: + type: boolean + isFreeTier: + type: boolean + isMtlsConnectionRequired: + type: boolean + licenseModel: + enum: + - LICENSE_INCLUDED + - BRING_YOUR_OWN_LICENSE + type: string + nsgIds: + items: + type: string + type: array + ocpuCount: + type: number + privateEndpointLabel: + type: string + subnetId: + type: string + whitelistedIps: + items: + type: string + type: array + type: object + hardLink: + default: false + type: boolean + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + wallet: + properties: + name: + type: string + password: + properties: + k8sSecret: + properties: + name: + type: string + type: object + ociSecret: + properties: + id: + type: string + type: object + type: object + type: object + required: + - action + type: object + status: + properties: + allConnectionStrings: + items: + properties: + connectionStrings: + items: + properties: + connectionString: + type: string + tnsName: + type: string + type: object + type: array + tlsAuthentication: + type: string + required: + - connectionStrings + type: object + type: array + conditions: + items: properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: - description: status of the condition, one of True, False, Unknown. enum: - "True" - "False" - Unknown type: string type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -1070,970 +1277,10602 @@ spec: - type type: object type: array - exporterConfig: + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + lifecycleState: type: string - replicas: - type: integer - status: + timeCreated: + type: string + walletExpiringDate: type: string - required: - - conditions - - exporterConfig type: object type: object served: true storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null - name: dataguardbrokers.database.oracle.com + cert-manager.io/inject-ca-from: oracle-database-operator-system/oracle-database-operator-serving-cert + controller-gen.kubebuilder.io/version: v0.16.5 + name: cdbs.database.oracle.com spec: group: database.oracle.com names: - kind: DataguardBroker - listKind: DataguardBrokerList - plural: dataguardbrokers - singular: dataguardbroker + kind: CDB + listKind: CDBList + plural: cdbs + singular: cdb scope: Namespaced versions: - additionalPrinterColumns: - - jsonPath: .status.primaryDatabase - name: Primary - type: string - - jsonPath: .status.standbyDatabases - name: Standbys - type: string - - jsonPath: .spec.protectionMode - name: Protection Mode + - description: Name of the CDB + jsonPath: .spec.cdbName + name: CDB Name type: string - - jsonPath: .status.clusterConnectString - name: Cluster Connect Str - priority: 1 + - description: ' Name of the DB Server' + jsonPath: .spec.dbServer + name: DB Server type: string - - jsonPath: .status.externalConnectString - name: Connect Str + - description: DB server port + jsonPath: .spec.dbPort + name: DB Port + type: integer + - description: Replicas + jsonPath: .spec.replicas + name: Replicas + type: integer + - description: Status of the CDB Resource + jsonPath: .status.phase + name: Status type: string - - jsonPath: .spec.primaryDatabaseRef - name: Primary Database - priority: 1 + - description: Error message, if any + jsonPath: .status.msg + name: Message type: string - - jsonPath: .status.status - name: Status + - description: ' string of the tnsalias' + jsonPath: .spec.dbTnsurl + name: TNS STRING type: string - name: v1alpha1 + name: v4 schema: openAPIV3Schema: - description: DataguardBroker is the Schema for the dataguardbrokers API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: DataguardBrokerSpec defines the desired state of DataguardBroker properties: - fastStartFailOver: + cdbAdminPwd: properties: - enable: - type: boolean - strategy: - items: - description: FSFO strategy - properties: - sourceDatabaseRef: - type: string - targetDatabaseRefs: - type: string - type: object - type: array + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret type: object - loadBalancer: + cdbAdminUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbName: + type: string + cdbOrdsPrvKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbOrdsPubKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbTlsCrt: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbTlsKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + dbPort: + type: integer + dbServer: + type: string + dbTnsurl: + type: string + deletePdbCascade: type: boolean nodeSelector: additionalProperties: type: string type: object - primaryDatabaseRef: + ordsImage: type: string - protectionMode: + ordsImagePullPolicy: enum: - - MaxPerformance - - MaxAvailability + - Always + - Never type: string - serviceAnnotations: - additionalProperties: - type: string - type: object - setAsPrimaryDatabase: + ordsImagePullSecret: type: string - standbyDatabaseRefs: - items: - type: string - type: array - required: - - primaryDatabaseRef - - protectionMode - - standbyDatabaseRefs + ordsPort: + type: integer + ordsPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + replicas: + type: integer + serviceName: + type: string + sysAdminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object type: object status: - description: DataguardBrokerStatus defines the observed state of DataguardBroker properties: - clusterConnectString: - type: string - externalConnectString: - type: string - primaryDatabase: - type: string - primaryDatabaseRef: - type: string - protectionMode: + msg: type: string - standbyDatabases: + phase: type: string status: - type: string + type: boolean + required: + - phase + - status type: object type: object served: true storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null - name: dbcssystems.database.oracle.com + controller-gen.kubebuilder.io/version: v0.16.5 + name: databaseobservers.observability.oracle.com spec: - group: database.oracle.com + group: observability.oracle.com names: - kind: DbcsSystem - listKind: DbcsSystemList - plural: dbcssystems - singular: dbcssystem + kind: DatabaseObserver + listKind: DatabaseObserverList + plural: databaseobservers + shortNames: + - dbobserver + - dbobservers + singular: databaseobserver scope: Namespaced versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .status.exporterConfig + name: ExporterConfig + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.version + name: Version + type: string + name: v1 schema: openAPIV3Schema: - description: DbcsSystem is the Schema for the dbcssystems API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: DbcsSystemSpec defines the desired state of DbcsSystem properties: - dbSystem: + configuration: properties: - availabilityDomain: - type: string - backupSubnetId: - type: string - clusterName: - type: string - compartmentId: - type: string - cpuCoreCount: - type: integer - dbAdminPaswordSecret: - type: string - dbBackupConfig: - description: DB Backup COnfig Network Struct + configMap: properties: - autoBackupEnabled: - type: boolean - autoBackupWindow: + key: type: string - backupDestinationDetails: + name: type: string - recoveryWindowsInDays: - type: integer type: object - dbDomain: - type: string - dbEdition: - type: string - dbName: + type: object + database: + properties: + dbConnectionString: + properties: + key: + type: string + secret: + type: string + type: object + dbPassword: + properties: + key: + type: string + secret: + type: string + vaultOCID: + type: string + vaultSecretName: + type: string + type: object + dbUser: + properties: + key: + type: string + secret: + type: string + type: object + dbWallet: + properties: + key: + type: string + secret: + type: string + type: object + type: object + exporter: + properties: + deployment: + properties: + args: + items: + type: string + type: array + commands: + items: + type: string + type: array + env: + additionalProperties: + type: string + type: object + image: + type: string + labels: + additionalProperties: + type: string + type: object + podTemplate: + properties: + labels: + additionalProperties: + type: string + type: object + securityContext: + properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + service: + properties: + labels: + additionalProperties: + type: string + type: object + ports: + items: + properties: + appProtocol: + type: string + name: + type: string + nodePort: + format: int32 + type: integer + port: + format: int32 + type: integer + protocol: + default: TCP + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + type: object + type: object + inheritLabels: + items: + type: string + type: array + log: + properties: + filename: type: string - dbUniqueName: + path: type: string - dbVersion: + volume: + properties: + name: + type: string + persistentVolumeClaim: + properties: + claimName: + type: string + type: object + type: object + type: object + ociConfig: + properties: + configMapName: type: string - dbWorkload: + secretName: + type: string + type: object + prometheus: + properties: + serviceMonitor: + properties: + endpoints: + items: + properties: + authorization: + properties: + credentials: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + type: string + type: object + basicAuth: + properties: + password: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenFile: + type: string + bearerTokenSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + enableHttp2: + type: boolean + filterRunning: + type: boolean + followRedirects: + type: boolean + honorLabels: + type: boolean + honorTimestamps: + type: boolean + interval: + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + metricRelabelings: + items: + properties: + action: + default: replace + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + type: string + type: object + type: array + oauth2: + properties: + clientId: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + type: object + noProxy: + type: string + proxyConnectHeader: + additionalProperties: + items: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: array + type: object + x-kubernetes-map-type: atomic + proxyFromEnvironment: + type: boolean + proxyUrl: + pattern: ^http(s)?://.+$ + type: string + scopes: + items: + type: string + type: array + tlsConfig: + properties: + ca: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + type: boolean + keySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + maxVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + minVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + serverName: + type: string + type: object + tokenUrl: + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + params: + additionalProperties: + items: + type: string + type: array + type: object + path: + type: string + port: + type: string + proxyUrl: + type: string + relabelings: + items: + properties: + action: + default: replace + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + type: string + type: object + type: array + scheme: + enum: + - http + - https + type: string + scrapeTimeout: + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + tlsConfig: + properties: + ca: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + caFile: + type: string + cert: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + certFile: + type: string + insecureSkipVerify: + type: boolean + keyFile: + type: string + keySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + maxVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + minVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + serverName: + type: string + type: object + trackTimestampsStaleness: + type: boolean + type: object + type: array + labels: + additionalProperties: + type: string + type: object + namespaceSelector: + properties: + any: + type: boolean + matchNames: + items: + type: string + type: array + type: object + type: object + type: object + replicas: + format: int32 + type: integer + sidecarVolumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + default: ext4 + type: string + kind: + type: string + readOnly: + default: false + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + default: default + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + default: /etc/ceph/keyring + type: string + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + default: xfs + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + type: boolean + storageMode: + default: ThinProvisioned + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + sidecars: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + exporterConfig: + type: string + replicas: + type: integer + status: + type: string + version: + type: string + required: + - conditions + - exporterConfig + - version + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.exporterConfig + name: ExporterConfig + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.version + name: Version + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + configuration: + properties: + configMap: + properties: + key: + type: string + name: + type: string + type: object + type: object + database: + properties: + dbConnectionString: + properties: + key: + type: string + secret: + type: string + type: object + dbPassword: + properties: + key: + type: string + secret: + type: string + vaultOCID: + type: string + vaultSecretName: + type: string + type: object + dbUser: + properties: + key: + type: string + secret: + type: string + type: object + dbWallet: + properties: + key: + type: string + secret: + type: string + type: object + type: object + exporter: + properties: + deployment: + properties: + args: + items: + type: string + type: array + commands: + items: + type: string + type: array + env: + additionalProperties: + type: string + type: object + image: + type: string + labels: + additionalProperties: + type: string + type: object + podTemplate: + properties: + labels: + additionalProperties: + type: string + type: object + securityContext: + properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + service: + properties: + labels: + additionalProperties: + type: string + type: object + ports: + items: + properties: + appProtocol: + type: string + name: + type: string + nodePort: + format: int32 + type: integer + port: + format: int32 + type: integer + protocol: + default: TCP + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + type: object + type: object + inheritLabels: + items: + type: string + type: array + log: + properties: + filename: + type: string + path: + type: string + volume: + properties: + name: + type: string + persistentVolumeClaim: + properties: + claimName: + type: string + type: object + type: object + type: object + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + prometheus: + properties: + serviceMonitor: + properties: + endpoints: + items: + properties: + authorization: + properties: + credentials: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + type: string + type: object + basicAuth: + properties: + password: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenFile: + type: string + bearerTokenSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + enableHttp2: + type: boolean + filterRunning: + type: boolean + followRedirects: + type: boolean + honorLabels: + type: boolean + honorTimestamps: + type: boolean + interval: + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + metricRelabelings: + items: + properties: + action: + default: replace + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + type: string + type: object + type: array + oauth2: + properties: + clientId: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + type: object + noProxy: + type: string + proxyConnectHeader: + additionalProperties: + items: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: array + type: object + x-kubernetes-map-type: atomic + proxyFromEnvironment: + type: boolean + proxyUrl: + pattern: ^http(s)?://.+$ + type: string + scopes: + items: + type: string + type: array + tlsConfig: + properties: + ca: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + type: boolean + keySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + maxVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + minVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + serverName: + type: string + type: object + tokenUrl: + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + params: + additionalProperties: + items: + type: string + type: array + type: object + path: + type: string + port: + type: string + proxyUrl: + type: string + relabelings: + items: + properties: + action: + default: replace + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + type: string + type: object + type: array + scheme: + enum: + - http + - https + type: string + scrapeTimeout: + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + tlsConfig: + properties: + ca: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + caFile: + type: string + cert: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + certFile: + type: string + insecureSkipVerify: + type: boolean + keyFile: + type: string + keySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + maxVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + minVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + serverName: + type: string + type: object + trackTimestampsStaleness: + type: boolean + type: object + type: array + labels: + additionalProperties: + type: string + type: object + namespaceSelector: + properties: + any: + type: boolean + matchNames: + items: + type: string + type: array + type: object + type: object + type: object + replicas: + format: int32 + type: integer + sidecarVolumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + default: ext4 + type: string + kind: + type: string + readOnly: + default: false + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + default: default + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + default: /etc/ceph/keyring + type: string + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + default: xfs + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + type: boolean + storageMode: + default: ThinProvisioned + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + sidecars: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + exporterConfig: + type: string + replicas: + type: integer + status: + type: string + version: + type: string + required: + - conditions + - exporterConfig + - version + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.exporterConfig + name: ExporterConfig + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.version + name: Version + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + configuration: + properties: + configMap: + properties: + key: + type: string + name: + type: string + type: object + type: object + database: + properties: + dbConnectionString: + properties: + key: + type: string + secret: + type: string + type: object + dbPassword: + properties: + key: + type: string + secret: + type: string + vaultOCID: + type: string + vaultSecretName: + type: string + type: object + dbUser: + properties: + key: + type: string + secret: + type: string + type: object + dbWallet: + properties: + key: + type: string + secret: + type: string + type: object + type: object + exporter: + properties: + deployment: + properties: + args: + items: + type: string + type: array + commands: + items: + type: string + type: array + env: + additionalProperties: + type: string + type: object + image: + type: string + labels: + additionalProperties: + type: string + type: object + podTemplate: + properties: + labels: + additionalProperties: + type: string + type: object + securityContext: + properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + type: object + service: + properties: + labels: + additionalProperties: + type: string + type: object + ports: + items: + properties: + appProtocol: + type: string + name: + type: string + nodePort: + format: int32 + type: integer + port: + format: int32 + type: integer + protocol: + default: TCP + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + type: object + type: object + inheritLabels: + items: + type: string + type: array + log: + properties: + filename: + type: string + path: + type: string + volume: + properties: + name: + type: string + persistentVolumeClaim: + properties: + claimName: + type: string + type: object + type: object + type: object + ociConfig: + properties: + configMapName: + type: string + secretName: + type: string + type: object + prometheus: + properties: + serviceMonitor: + properties: + endpoints: + items: + properties: + authorization: + properties: + credentials: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + type: string + type: object + basicAuth: + properties: + password: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenFile: + type: string + bearerTokenSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + enableHttp2: + type: boolean + filterRunning: + type: boolean + followRedirects: + type: boolean + honorLabels: + type: boolean + honorTimestamps: + type: boolean + interval: + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + metricRelabelings: + items: + properties: + action: + default: replace + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + type: string + type: object + type: array + oauth2: + properties: + clientId: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + type: object + noProxy: + type: string + proxyConnectHeader: + additionalProperties: + items: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: array + type: object + x-kubernetes-map-type: atomic + proxyFromEnvironment: + type: boolean + proxyUrl: + pattern: ^http(s)?://.+$ + type: string + scopes: + items: + type: string + type: array + tlsConfig: + properties: + ca: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + type: boolean + keySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + maxVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + minVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + serverName: + type: string + type: object + tokenUrl: + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + params: + additionalProperties: + items: + type: string + type: array + type: object + path: + type: string + port: + type: string + proxyUrl: + type: string + relabelings: + items: + properties: + action: + default: replace + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + type: string + type: object + type: array + scheme: + enum: + - http + - https + type: string + scrapeTimeout: + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + tlsConfig: + properties: + ca: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + caFile: + type: string + cert: + properties: + configMap: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + certFile: + type: string + insecureSkipVerify: + type: boolean + keyFile: + type: string + keySecret: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + maxVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + minVersion: + enum: + - TLS10 + - TLS11 + - TLS12 + - TLS13 + type: string + serverName: + type: string + type: object + trackTimestampsStaleness: + type: boolean + type: object + type: array + labels: + additionalProperties: + type: string + type: object + namespaceSelector: + properties: + any: + type: boolean + matchNames: + items: + type: string + type: array + type: object + type: object + type: object + replicas: + format: int32 + type: integer + sidecarVolumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + default: ext4 + type: string + kind: + type: string + readOnly: + default: false + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + default: default + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + default: /etc/ceph/keyring + type: string + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + default: xfs + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + type: boolean + storageMode: + default: ThinProvisioned + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + sidecars: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + exporterConfig: + type: string + replicas: + type: integer + status: + type: string + version: + type: string + required: + - conditions + - exporterConfig + - version + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: dataguardbrokers.database.oracle.com +spec: + group: database.oracle.com + names: + kind: DataguardBroker + listKind: DataguardBrokerList + plural: dataguardbrokers + singular: dataguardbroker + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.primaryDatabase + name: Primary + type: string + - jsonPath: .status.standbyDatabases + name: Standbys + type: string + - jsonPath: .spec.protectionMode + name: Protection Mode + type: string + - jsonPath: .status.clusterConnectString + name: Cluster Connect Str + priority: 1 + type: string + - jsonPath: .status.externalConnectString + name: Connect Str + type: string + - jsonPath: .spec.primaryDatabaseRef + name: Primary Database + priority: 1 + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.fastStartFailover + name: FSFO + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + fastStartFailover: + type: boolean + loadBalancer: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + primaryDatabaseRef: + type: string + protectionMode: + enum: + - MaxPerformance + - MaxAvailability + type: string + serviceAnnotations: + additionalProperties: + type: string + type: object + setAsPrimaryDatabase: + type: string + standbyDatabaseRefs: + items: + type: string + type: array + required: + - primaryDatabaseRef + - protectionMode + - standbyDatabaseRefs + type: object + status: + properties: + clusterConnectString: + type: string + databasesInDataguardConfig: + additionalProperties: + type: string + type: object + externalConnectString: + type: string + fastStartFailover: + type: boolean + primaryDatabase: + type: string + primaryDatabaseRef: + type: string + protectionMode: + type: string + standbyDatabases: + type: string + status: + type: string + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.primaryDatabase + name: Primary + type: string + - jsonPath: .status.standbyDatabases + name: Standbys + type: string + - jsonPath: .spec.protectionMode + name: Protection Mode + type: string + - jsonPath: .status.clusterConnectString + name: Cluster Connect Str + priority: 1 + type: string + - jsonPath: .status.externalConnectString + name: Connect Str + type: string + - jsonPath: .spec.primaryDatabaseRef + name: Primary Database + priority: 1 + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.fastStartFailover + name: FSFO + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + fastStartFailover: + type: boolean + loadBalancer: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + primaryDatabaseRef: + type: string + protectionMode: + enum: + - MaxPerformance + - MaxAvailability + type: string + serviceAnnotations: + additionalProperties: + type: string + type: object + setAsPrimaryDatabase: + type: string + standbyDatabaseRefs: + items: + type: string + type: array + required: + - primaryDatabaseRef + - protectionMode + - standbyDatabaseRefs + type: object + status: + properties: + clusterConnectString: + type: string + databasesInDataguardConfig: + additionalProperties: + type: string + type: object + externalConnectString: + type: string + fastStartFailover: + type: boolean + primaryDatabase: + type: string + primaryDatabaseRef: + type: string + protectionMode: + type: string + standbyDatabases: + type: string + status: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: oracle-database-operator-system/oracle-database-operator-serving-cert + controller-gen.kubebuilder.io/version: v0.16.5 + name: dbcssystems.database.oracle.com +spec: + group: database.oracle.com + names: + kind: DbcsSystem + listKind: DbcsSystemList + plural: dbcssystems + singular: dbcssystem + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + databaseId: + type: string + dbBackupId: + type: string + dbClone: + properties: + dbAdminPaswordSecret: + type: string + dbDbUniqueName: + type: string + dbName: + type: string + displayName: + type: string + domain: + type: string + hostName: + type: string + initialDataStorageSizeInGB: + type: integer + kmsKeyId: + type: string + kmsKeyVersionId: + type: string + licenseModel: + type: string + privateIp: + type: string + sidPrefix: + type: string + sshPublicKeys: + items: + type: string + type: array + subnetId: + type: string + tdeWalletPasswordSecret: + type: string + required: + - dbDbUniqueName + - dbName + - displayName + - hostName + - subnetId + type: object + dbSystem: + properties: + availabilityDomain: + type: string + backupSubnetId: + type: string + clusterName: + type: string + compartmentId: + type: string + cpuCoreCount: + type: integer + dbAdminPaswordSecret: + type: string + dbBackupConfig: + properties: + autoBackupEnabled: + type: boolean + autoBackupWindow: + type: string + backupDestinationDetails: + type: string + recoveryWindowsInDays: + type: integer + type: object + dbDomain: + type: string + dbEdition: + type: string + dbName: + type: string + dbUniqueName: + type: string + dbVersion: + type: string + dbWorkload: + type: string + diskRedundancy: + type: string + displayName: + type: string + domain: + type: string + faultDomains: + items: + type: string + type: array + hostName: + type: string + initialDataStorageSizeInGB: + type: integer + kmsConfig: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyName: + type: string + vaultName: + type: string + vaultType: + type: string + type: object + licenseModel: + type: string + nodeCount: + type: integer + pdbName: + type: string + privateIp: + type: string + shape: + type: string + sshPublicKeys: + items: + type: string + type: array + storageManagement: + type: string + subnetId: + type: string + tags: + additionalProperties: + type: string + type: object + tdeWalletPasswordSecret: + type: string + timeZone: + type: string + required: + - availabilityDomain + - compartmentId + - dbAdminPaswordSecret + - hostName + - shape + - subnetId + type: object + hardLink: + type: boolean + id: + type: string + kmsConfig: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyName: + type: string + vaultName: + type: string + vaultType: + type: string + type: object + ociConfigMap: + type: string + ociSecret: + type: string + pdbConfigs: + items: + properties: + freeformTags: + additionalProperties: + type: string + type: object + isDelete: + type: boolean + pdbAdminPassword: + type: string + pdbName: + type: string + pluggableDatabaseId: + type: string + shouldPdbAdminAccountBeLocked: + type: boolean + tdeWalletPassword: + type: string + required: + - freeformTags + - pdbAdminPassword + - pdbName + - shouldPdbAdminAccountBeLocked + - tdeWalletPassword + type: object + type: array + setupDBCloning: + type: boolean + required: + - ociConfigMap + type: object + status: + properties: + availabilityDomain: + type: string + cpuCoreCount: + type: integer + dataStoragePercentage: + type: integer + dataStorageSizeInGBs: + type: integer + dbCloneStatus: + properties: + dbAdminPaswordSecret: + type: string + dbDbUniqueName: + type: string + dbName: + type: string + displayName: + type: string + domain: + type: string + hostName: + type: string + id: + type: string + licenseModel: + type: string + sshPublicKeys: + items: + type: string + type: array + subnetId: + type: string + required: + - dbDbUniqueName + - hostName + type: object + dbEdition: + type: string + dbInfo: + items: + properties: + dbHomeId: + type: string + dbName: + type: string + dbUniqueName: + type: string + dbWorkload: + type: string + id: + type: string + type: object + type: array + displayName: + type: string + id: + type: string + kmsDetailsStatus: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyId: + type: string + keyName: + type: string + managementEndpoint: + type: string + vaultId: + type: string + vaultName: + type: string + vaultType: + type: string + type: object + licenseModel: + type: string + network: + properties: + clientSubnet: + type: string + domainName: + type: string + hostName: + type: string + listenerPort: + type: integer + networkSG: + type: string + scanDnsName: + type: string + vcnName: + type: string + type: object + nodeCount: + type: integer + pdbDetailsStatus: + items: + properties: + pdbConfigStatus: + items: + properties: + freeformTags: + additionalProperties: + type: string + type: object + pdbName: + type: string + pdbState: + type: string + pluggableDatabaseId: + type: string + shouldPdbAdminAccountBeLocked: + type: boolean + required: + - freeformTags + - pdbName + - shouldPdbAdminAccountBeLocked + type: object + type: array + type: object + type: array + recoStorageSizeInGB: + type: integer + shape: + type: string + state: + type: string + storageManagement: + type: string + subnetId: + type: string + timeZone: + type: string + workRequests: + items: + properties: + operationId: + type: string + operationType: + type: string + percentComplete: + type: string + timeAccepted: + type: string + timeFinished: + type: string + timeStarted: + type: string + required: + - operationId + - operationType + type: object + type: array + required: + - state + type: object + type: object + served: true + storage: false + subresources: + status: {} + - name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + databaseId: + type: string + dbBackupId: + type: string + dbClone: + properties: + dbAdminPasswordSecret: + type: string + dbDbUniqueName: + type: string + dbName: + type: string + displayName: + type: string + domain: + type: string + hostName: + type: string + initialDataStorageSizeInGB: + type: integer + kmsKeyId: + type: string + kmsKeyVersionId: + type: string + licenseModel: + type: string + privateIp: + type: string + sidPrefix: + type: string + sshPublicKeys: + items: + type: string + type: array + subnetId: + type: string + tdeWalletPasswordSecret: + type: string + required: + - dbDbUniqueName + - dbName + - displayName + - hostName + - subnetId + type: object + dbSystem: + properties: + availabilityDomain: + type: string + backupSubnetId: + type: string + clusterName: + type: string + compartmentId: + type: string + cpuCoreCount: + type: integer + dbAdminPasswordSecret: + type: string + dbBackupConfig: + properties: + autoBackupEnabled: + type: boolean + autoBackupWindow: + type: string + backupDestinationDetails: + type: string + recoveryWindowsInDays: + type: integer + type: object + dbDomain: + type: string + dbEdition: + type: string + dbName: + type: string + dbUniqueName: + type: string + dbVersion: + type: string + dbWorkload: type: string diskRedundancy: type: string - displayName: + displayName: + type: string + domain: + type: string + faultDomains: + items: + type: string + type: array + hostName: + type: string + initialDataStorageSizeInGB: + type: integer + kmsConfig: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyName: + type: string + vaultName: + type: string + vaultType: + type: string + type: object + licenseModel: + type: string + nodeCount: + type: integer + pdbName: + type: string + privateIp: + type: string + shape: + type: string + sshPublicKeys: + items: + type: string + type: array + storageManagement: + type: string + subnetId: + type: string + tags: + additionalProperties: + type: string + type: object + tdeWalletPasswordSecret: + type: string + timeZone: + type: string + required: + - availabilityDomain + - compartmentId + - dbAdminPasswordSecret + - hostName + - shape + - subnetId + type: object + hardLink: + type: boolean + id: + type: string + kmsConfig: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyName: + type: string + vaultName: + type: string + vaultType: + type: string + type: object + ociConfigMap: + type: string + ociSecret: + type: string + pdbConfigs: + items: + properties: + freeformTags: + additionalProperties: + type: string + type: object + isDelete: + type: boolean + pdbAdminPassword: + type: string + pdbName: + type: string + pluggableDatabaseId: + type: string + shouldPdbAdminAccountBeLocked: + type: boolean + tdeWalletPassword: + type: string + required: + - freeformTags + - pdbAdminPassword + - pdbName + - shouldPdbAdminAccountBeLocked + - tdeWalletPassword + type: object + type: array + setupDBCloning: + type: boolean + required: + - ociConfigMap + type: object + status: + properties: + availabilityDomain: + type: string + cpuCoreCount: + type: integer + dataStoragePercentage: + type: integer + dataStorageSizeInGBs: + type: integer + dbCloneStatus: + properties: + dbAdminPaswordSecret: + type: string + dbDbUniqueName: + type: string + dbName: + type: string + displayName: + type: string + domain: + type: string + hostName: + type: string + id: + type: string + licenseModel: + type: string + sshPublicKeys: + items: + type: string + type: array + subnetId: + type: string + required: + - dbDbUniqueName + - hostName + type: object + dbEdition: + type: string + dbInfo: + items: + properties: + dbHomeId: + type: string + dbName: + type: string + dbUniqueName: + type: string + dbWorkload: + type: string + id: + type: string + type: object + type: array + displayName: + type: string + id: + type: string + kmsDetailsStatus: + properties: + compartmentId: + type: string + encryptionAlgo: + type: string + keyId: + type: string + keyName: + type: string + managementEndpoint: + type: string + vaultId: + type: string + vaultName: + type: string + vaultType: + type: string + type: object + licenseModel: + type: string + network: + properties: + clientSubnet: + type: string + domainName: + type: string + hostName: + type: string + listenerPort: + type: integer + networkSG: + type: string + scanDnsName: + type: string + vcnName: + type: string + type: object + nodeCount: + type: integer + pdbDetailsStatus: + items: + properties: + pdbConfigStatus: + items: + properties: + freeformTags: + additionalProperties: + type: string + type: object + pdbName: + type: string + pdbState: + type: string + pluggableDatabaseId: + type: string + shouldPdbAdminAccountBeLocked: + type: boolean + type: object + type: array + type: object + type: array + recoStorageSizeInGB: + type: integer + shape: + type: string + state: + type: string + storageManagement: + type: string + subnetId: + type: string + timeZone: + type: string + workRequests: + items: + properties: + operationId: + type: string + operationType: + type: string + percentComplete: + type: string + timeAccepted: + type: string + timeFinished: + type: string + timeStarted: + type: string + required: + - operationId + - operationType + type: object + type: array + required: + - state + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: lrests.database.oracle.com +spec: + group: database.oracle.com + names: + kind: LREST + listKind: LRESTList + plural: lrests + singular: lrest + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Name of the LREST + jsonPath: .spec.cdbName + name: CDB NAME + type: string + - description: ' Name of the DB Server' + jsonPath: .spec.dbServer + name: DB Server + type: string + - description: DB server port + jsonPath: .spec.dbPort + name: DB Port + type: integer + - description: Replicas + jsonPath: .spec.replicas + name: Replicas + type: integer + - description: Status of the LREST Resource + jsonPath: .status.phase + name: Status + type: string + - description: Error message if any + jsonPath: .status.msg + name: Message + type: string + - description: string of the tnsalias + jsonPath: .spec.dbTnsurl + name: TNS STRING + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + cdbAdminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbAdminUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbName: + type: string + cdbPrvKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbPubKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbTlsCrt: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbTlsKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + dbPort: + type: integer + dbServer: + type: string + dbTnsurl: + type: string + deletePdbCascade: + type: boolean + lrestImage: + type: string + lrestImagePullPolicy: + enum: + - Always + - Never + type: string + lrestImagePullSecret: + type: string + lrestPort: + type: integer + lrestPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + nodeSelector: + additionalProperties: + type: string + type: object + replicas: + type: integer + serviceName: + type: string + sysAdminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + type: object + status: + properties: + msg: + type: string + phase: + type: string + status: + type: boolean + required: + - phase + - status + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: lrpdbs.database.oracle.com +spec: + group: database.oracle.com + names: + kind: LRPDB + listKind: LRPDBList + plural: lrpdbs + singular: lrpdb + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Name of the CDB + jsonPath: .spec.cdbName + name: CDB Name + type: string + - description: Name of the PDB + jsonPath: .spec.pdbName + name: PDB Name + type: string + - description: PDB Open Mode + jsonPath: .status.openMode + name: PDB State + type: string + - description: Total Size of the PDB + jsonPath: .status.totalSize + name: PDB Size + type: string + - description: Status of the LRPDB Resource + jsonPath: .status.phase + name: Status + type: string + - description: Error message, if any + jsonPath: .status.msg + name: Message + type: string + - description: last sqlcode + jsonPath: .status.sqlCode + name: last sqlcode + type: integer + - description: The connect string to be used + jsonPath: .status.connString + name: Connect_String + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + action: + enum: + - Create + - Clone + - Plug + - Unplug + - Delete + - Modify + - Status + - Map + - Alter + - Noaction + type: string + adminName: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + adminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + adminpdbPass: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + adminpdbUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + alterSystem: + type: string + alterSystemParameter: + type: string + alterSystemValue: + type: string + asClone: + type: boolean + assertiveLrpdbDeletion: + type: boolean + cdbName: + type: string + cdbNamespace: + type: string + cdbPrvKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbResName: + type: string + copyAction: + enum: + - COPY + - NOCOPY + - MOVE + type: string + dropAction: + enum: + - INCLUDING + - KEEP + type: string + fileNameConversions: + type: string + getScript: + type: boolean + lrpdbTlsCat: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + lrpdbTlsCrt: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + lrpdbTlsKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + modifyOption: + enum: + - IMMEDIATE + - NORMAL + - READ ONLY + - READ WRITE + - RESTRICTED + type: string + parameterScope: + type: string + pdbName: + type: string + pdbState: + enum: + - OPEN + - CLOSE + - ALTER + type: string + pdbconfigmap: + type: string + reuseTempFile: + type: boolean + sourceFileNameConversions: + type: string + sparseClonePath: + type: string + srcPdbName: + type: string + tdeExport: + type: boolean + tdeImport: + type: boolean + tdeKeystorePath: + type: string + tdePassword: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + tdeSecret: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + tempSize: + type: string + totalSize: + type: string + unlimitedStorage: + type: boolean + webServerPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + xmlFileName: + type: string + required: + - action + - alterSystemParameter + - alterSystemValue + - webServerPwd + type: object + status: + properties: + action: + type: string + alterSystem: + type: string + bitstat: + type: integer + bitstatstr: + type: string + connString: + type: string + modifyOption: + type: string + msg: + type: string + openMode: + type: string + phase: + type: string + sqlCode: + type: integer + status: + type: boolean + totalSize: + type: string + required: + - phase + - sqlCode + - status + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: oraclerestdataservices.database.oracle.com +spec: + group: database.oracle.com + names: + kind: OracleRestDataService + listKind: OracleRestDataServiceList + plural: oraclerestdataservices + singular: oraclerestdataservice + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .spec.databaseRef + name: Database + type: string + - jsonPath: .status.databaseApiUrl + name: Database API URL + type: string + - jsonPath: .status.databaseActionsUrl + name: Database Actions URL + type: string + - jsonPath: .status.apexUrl + name: Apex URL + type: string + - jsonPath: .status.mongoDbApiAccessUrl + name: MongoDbApi Access URL + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + adminPassword: + properties: + keepSecret: + type: boolean + secretKey: + default: oracle_pwd + type: string + secretName: + type: string + required: + - secretName + type: object + databaseRef: + type: string + image: + properties: + pullFrom: + type: string + pullSecrets: + type: string + version: + type: string + required: + - pullFrom + type: object + loadBalancer: + type: boolean + mongoDbApi: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + oracleService: + type: string + ordsPassword: + properties: + keepSecret: + type: boolean + secretKey: + default: oracle_pwd + type: string + secretName: + type: string + required: + - secretName + type: object + ordsUser: + type: string + persistence: + properties: + accessMode: + enum: + - ReadWriteOnce + - ReadWriteMany + type: string + setWritePermissions: + type: boolean + size: + type: string + storageClass: + type: string + volumeName: + type: string + type: object + readinessCheckPeriod: + type: integer + replicas: + minimum: 1 + type: integer + restEnableSchemas: + items: + properties: + enable: + type: boolean + pdbName: + type: string + schemaName: + type: string + urlMapping: + type: string + required: + - enable + - schemaName + type: object + type: array + serviceAccountName: + type: string + serviceAnnotations: + additionalProperties: + type: string + type: object + required: + - adminPassword + - databaseRef + - ordsPassword + type: object + status: + properties: + apexConfigured: + type: boolean + apexUrl: + type: string + commonUsersCreated: + type: boolean + databaseActionsUrl: + type: string + databaseApiUrl: + type: string + databaseRef: + type: string + image: + properties: + pullFrom: + type: string + pullSecrets: + type: string + version: + type: string + required: + - pullFrom + type: object + loadBalancer: + type: string + mongoDbApi: + type: boolean + mongoDbApiAccessUrl: + type: string + ordsInstalled: + type: boolean + replicas: + type: integer + serviceIP: + type: string + status: + type: string + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .spec.databaseRef + name: Database + type: string + - jsonPath: .status.databaseApiUrl + name: Database API URL + type: string + - jsonPath: .status.databaseActionsUrl + name: Database Actions URL + type: string + - jsonPath: .status.apexUrl + name: Apex URL + type: string + - jsonPath: .status.mongoDbApiAccessUrl + name: MongoDbApi Access URL + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + adminPassword: + properties: + keepSecret: + type: boolean + secretKey: + default: oracle_pwd + type: string + secretName: + type: string + required: + - secretName + type: object + databaseRef: + type: string + image: + properties: + pullFrom: + type: string + pullSecrets: + type: string + version: + type: string + required: + - pullFrom + type: object + loadBalancer: + type: boolean + mongoDbApi: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + oracleService: + type: string + ordsPassword: + properties: + keepSecret: + type: boolean + secretKey: + default: oracle_pwd + type: string + secretName: + type: string + required: + - secretName + type: object + ordsUser: + type: string + persistence: + properties: + accessMode: + enum: + - ReadWriteOnce + - ReadWriteMany + type: string + setWritePermissions: + type: boolean + size: + type: string + storageClass: + type: string + volumeName: + type: string + type: object + readinessCheckPeriod: + type: integer + replicas: + minimum: 1 + type: integer + restEnableSchemas: + items: + properties: + enable: + type: boolean + pdbName: + type: string + schemaName: + type: string + urlMapping: + type: string + required: + - enable + - schemaName + type: object + type: array + serviceAccountName: + type: string + serviceAnnotations: + additionalProperties: + type: string + type: object + required: + - adminPassword + - databaseRef + - ordsPassword + type: object + status: + properties: + apexConfigured: + type: boolean + apexUrl: + type: string + commonUsersCreated: + type: boolean + databaseActionsUrl: + type: string + databaseApiUrl: + type: string + databaseRef: + type: string + image: + properties: + pullFrom: + type: string + pullSecrets: type: string - domain: + version: type: string - faultDomains: - items: - type: string - type: array - hostName: + required: + - pullFrom + type: object + loadBalancer: + type: string + mongoDbApi: + type: boolean + mongoDbApiAccessUrl: + type: string + ordsInstalled: + type: boolean + replicas: + type: integer + serviceIP: + type: string + status: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: ordssrvs.database.oracle.com +spec: + group: database.oracle.com + names: + kind: OrdsSrvs + listKind: OrdsSrvsList + plural: ordssrvs + singular: ordssrvs + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.status + name: status + type: string + - jsonPath: .status.workloadType + name: workloadType + type: string + - jsonPath: .status.ordsVersion + name: ordsVersion + type: string + - jsonPath: .status.httpPort + name: httpPort + type: integer + - jsonPath: .status.httpsPort + name: httpsPort + type: integer + - jsonPath: .status.mongoPort + name: MongoPort + type: integer + - jsonPath: .status.restartRequired + name: restartRequired + type: boolean + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + - jsonPath: .status.ordsInstalled + name: OrdsInstalled + type: boolean + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + encPrivKey: + properties: + passwordKey: + default: password type: string - initialDataStorageSizeInGB: + secretName: + type: string + required: + - secretName + type: object + forceRestart: + type: boolean + globalSettings: + properties: + cache.metadata.enabled: + type: boolean + cache.metadata.graphql.expireAfterAccess: + format: int64 type: integer - kmsKeyId: + cache.metadata.graphql.expireAfterWrite: + format: int64 + type: integer + cache.metadata.jwks.enabled: + type: boolean + cache.metadata.jwks.expireAfterAccess: + format: int64 + type: integer + cache.metadata.jwks.expireAfterWrite: + format: int64 + type: integer + cache.metadata.jwks.initialCapacity: + format: int32 + type: integer + cache.metadata.jwks.maximumSize: + format: int32 + type: integer + cache.metadata.timeout: + format: int64 + type: integer + certSecret: + properties: + cert: + type: string + key: + type: string + secretName: + type: string + required: + - cert + - key + - secretName + type: object + database.api.enabled: + type: boolean + database.api.management.services.disabled: + type: boolean + db.invalidPoolTimeout: + format: int64 + type: integer + debug.printDebugToScreen: + type: boolean + enable.mongo.access.log: + default: false + type: boolean + enable.standalone.access.log: + default: false + type: boolean + error.responseFormat: type: string - kmsKeyVersionId: + feature.grahpql.max.nesting.depth: + format: int32 + type: integer + icap.port: + format: int32 + type: integer + icap.secure.port: + format: int32 + type: integer + icap.server: type: string - licenseModel: + log.procedure: + type: boolean + mongo.enabled: + type: boolean + mongo.idle.timeout: + format: int64 + type: integer + mongo.op.timeout: + format: int64 + type: integer + mongo.port: + default: 27017 + format: int32 + type: integer + request.traceHeaderName: type: string - nodeCount: + security.credentials.attempts: + format: int32 type: integer - pdbName: + security.credentials.lock.time: + format: int64 + type: integer + security.disableDefaultExclusionList: + type: boolean + security.exclusionList: type: string - privateIp: + security.externalSessionTrustedOrigins: type: string - shape: + security.forceHTTPS: + type: boolean + security.httpsHeaderCheck: type: string - sshPublicKeys: - items: + security.inclusionList: + type: string + security.maxEntries: + format: int32 + type: integer + security.verifySSL: + type: boolean + standalone.context.path: + default: /ords + type: string + standalone.http.port: + default: 8080 + format: int32 + type: integer + standalone.https.host: + type: string + standalone.https.port: + default: 8443 + format: int32 + type: integer + standalone.stop.timeout: + format: int64 + type: integer + type: object + image: + type: string + imagePullPolicy: + default: IfNotPresent + enum: + - IfNotPresent + - Always + - Never + type: string + imagePullSecrets: + type: string + poolSettings: + items: + properties: + apex.security.administrator.roles: + type: string + apex.security.user.roles: + type: string + autoUpgradeAPEX: + default: false + type: boolean + autoUpgradeORDS: + default: false + type: boolean + db.adminUser: + type: string + db.adminUser.secret: + properties: + passwordKey: + default: password + type: string + secretName: + type: string + required: + - secretName + type: object + db.cdb.adminUser: + type: string + db.cdb.adminUser.secret: + properties: + passwordKey: + default: password + type: string + secretName: + type: string + required: + - secretName + type: object + db.connectionType: + enum: + - basic + - tns + - customurl + type: string + db.credentialsSource: + enum: + - pool + - request + type: string + db.customURL: + type: string + db.hostname: + type: string + db.poolDestroyTimeout: + format: int64 + type: integer + db.port: + format: int32 + type: integer + db.secret: + properties: + passwordKey: + default: password + type: string + secretName: + type: string + required: + - secretName + type: object + db.servicename: + type: string + db.sid: + type: string + db.tnsAliasName: + type: string + db.username: + default: ORDS_PUBLIC_USER + type: string + db.wallet.zip.service: + type: string + dbWalletSecret: + properties: + secretName: + type: string + walletName: + type: string + required: + - secretName + - walletName + type: object + debug.trackResources: + type: boolean + feature.openservicebroker.exclude: + type: boolean + feature.sdw: + type: boolean + http.cookie.filter: + type: string + jdbc.DriverType: + enum: + - thin + - oci8 + type: string + jdbc.InactivityTimeout: + format: int32 + type: integer + jdbc.InitialLimit: + format: int32 + type: integer + jdbc.MaxConnectionReuseCount: + format: int32 + type: integer + jdbc.MaxConnectionReuseTime: + format: int32 + type: integer + jdbc.MaxLimit: + format: int32 + type: integer + jdbc.MaxStatementsLimit: + format: int32 + type: integer + jdbc.MinLimit: + format: int32 + type: integer + jdbc.SecondsToTrustIdleConnection: + format: int32 + type: integer + jdbc.auth.admin.role: + type: string + jdbc.auth.enabled: + type: boolean + jdbc.cleanup.mode: + type: string + jdbc.statementTimeout: + format: int32 + type: integer + misc.defaultPage: + type: string + misc.pagination.maxRows: + format: int32 + type: integer + owa.trace.sql: + type: boolean + plsql.gateway.mode: + enum: + - disabled + - direct + - proxied + type: string + poolName: + type: string + procedure.preProcess: + type: string + procedure.rest.preHook: + type: string + procedurePostProcess: + type: string + restEnabledSql.active: + type: boolean + security.jwks.connection.timeout: + format: int64 + type: integer + security.jwks.read.timeout: + format: int64 + type: integer + security.jwks.refresh.interval: + format: int64 + type: integer + security.jwks.size: + format: int32 + type: integer + security.jwt.allowed.age: + format: int64 + type: integer + security.jwt.allowed.skew: + format: int64 + type: integer + security.jwt.profile.enabled: + type: boolean + security.requestAuthenticationFunction: + type: string + security.requestValidationFunction: + default: ords_util.authorize_plsql_gateway + type: string + security.validationFunctionType: + enum: + - plsql + - javascript type: string - type: array - storageManagement: - type: string - subnetId: - type: string - tags: - additionalProperties: + soda.defaultLimit: type: string - type: object - tdeWalletPasswordSecret: - type: string - timeZone: - type: string - required: - - availabilityDomain - - compartmentId - - dbAdminPaswordSecret - - hostName - - shape - - sshPublicKeys - - subnetId - type: object - hardLink: - type: boolean - id: - type: string - ociConfigMap: - type: string - ociSecret: + soda.maxLimit: + type: string + tnsAdminSecret: + properties: + secretName: + type: string + required: + - secretName + type: object + required: + - db.secret + - poolName + type: object + type: array + replicas: + default: 1 + format: int32 + minimum: 1 + type: integer + workloadType: + default: Deployment + enum: + - Deployment + - StatefulSet + - DaemonSet type: string required: - - ociConfigMap + - globalSettings + - image type: object status: - description: DbcsSystemStatus defines the observed state of DbcsSystem properties: - availabilityDomain: - type: string - cpuCoreCount: - type: integer - dataStoragePercentage: - type: integer - dataStorageSizeInGBs: - type: integer - dbEdition: - type: string - dbInfo: + conditions: items: - description: DbcsSystemStatus defines the observed state of DbcsSystem properties: - dbHomeId: + lastTransitionTime: + format: date-time type: string - dbName: + message: + maxLength: 32768 type: string - dbUniqueName: + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string - dbWorkload: + status: + enum: + - "True" + - "False" + - Unknown type: string - id: + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string + required: + - lastTransitionTime + - message + - reason + - status + - type type: object type: array - displayName: + httpPort: + format: int32 + type: integer + httpsPort: + format: int32 + type: integer + mongoPort: + format: int32 + type: integer + ordsInstalled: + type: boolean + ordsVersion: type: string - id: + restartRequired: + type: boolean + status: type: string - licenseModel: + workloadType: type: string - network: + required: + - restartRequired + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: oracle-database-operator-system/oracle-database-operator-serving-cert + controller-gen.kubebuilder.io/version: v0.16.5 + name: pdbs.database.oracle.com +spec: + group: database.oracle.com + names: + kind: PDB + listKind: PDBList + plural: pdbs + singular: pdb + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Name of the CDB + jsonPath: .spec.cdbName + name: CDB Name + type: string + - description: Name of the PDB + jsonPath: .spec.pdbName + name: PDB Name + type: string + - description: PDB Open Mode + jsonPath: .status.openMode + name: PDB State + type: string + - description: Total Size of the PDB + jsonPath: .status.totalSize + name: PDB Size + type: string + - description: Status of the PDB Resource + jsonPath: .status.phase + name: Status + type: string + - description: Error message, if any + jsonPath: .status.msg + name: Message + type: string + - description: The connect string to be used + jsonPath: .status.connString + name: Connect_String + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + action: + enum: + - Create + - Clone + - Plug + - Unplug + - Delete + - Modify + - Status + - Map + type: string + adminName: properties: - clientSubnet: - type: string - domainName: - type: string - hostName: - type: string - listenerPort: - type: integer - networkSG: - type: string - scanDnsName: - type: string - vcnName: - type: string + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret type: object - nodeCount: - type: integer - recoStorageSizeInGB: - type: integer - shape: + adminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + asClone: + type: boolean + assertivePdbDeletion: + type: boolean + assertivePdbDeletion: + description: turn on the assertive approach to delete pdb resource kubectl delete pdb ..... automatically triggers the pluggable database deletion + type: boolean + cdbName: type: string - state: + cdbNamespace: type: string - storageManagement: + cdbNamespace: + description: CDB Namespace type: string - subnetId: + cdbResName: + type: string + copyAction: + enum: + - COPY + - NOCOPY + - MOVE + type: string + dropAction: + enum: + - INCLUDING + - KEEP + type: string + fileNameConversions: + type: string + getScript: + type: boolean + modifyOption: + enum: + - IMMEDIATE + - NORMAL + - READ ONLY + - READ WRITE + - RESTRICTED type: string - timeZone: + pdbName: type: string - workRequests: - items: - properties: - operationId: - type: string - operationType: - type: string - percentComplete: - type: string - timeAccepted: - type: string - timeFinished: - type: string - timeStarted: - type: string - required: - - operationId - - operationType - type: object - type: array - required: - - state - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null - name: oraclerestdataservices.database.oracle.com -spec: - group: database.oracle.com - names: - kind: OracleRestDataService - listKind: OracleRestDataServiceList - plural: oraclerestdataservices - singular: oraclerestdataservice - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .status.status - name: Status - type: string - - jsonPath: .spec.databaseRef - name: Database - type: string - - jsonPath: .status.databaseApiUrl - name: Database API URL - type: string - - jsonPath: .status.databaseActionsUrl - name: Database Actions URL - type: string - - jsonPath: .status.apexUrl - name: Apex URL - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: OracleRestDataService is the Schema for the oraclerestdataservices API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: OracleRestDataServiceSpec defines the desired state of OracleRestDataService - properties: - adminPassword: - description: OracleRestDataServicePassword defines the secret containing Password mapped to secretKey + pdbOrdsPrvKey: properties: - keepSecret: - type: boolean - secretKey: - default: oracle_pwd - type: string - secretName: - type: string + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object required: - - secretName + - secret type: object - apexPassword: - description: OracleRestDataServicePassword defines the secret containing Password mapped to secretKey + pdbOrdsPubKey: properties: - keepSecret: - type: boolean - secretKey: - default: oracle_pwd - type: string - secretName: - type: string + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object required: - - secretName + - secret type: object - databaseRef: + pdbState: + enum: + - OPEN + - CLOSE type: string - image: - description: OracleRestDataServiceImage defines the Image source and pullSecrets for POD + pdbTlsCat: properties: - pullFrom: - type: string - pullSecrets: - type: string - version: - type: string + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object required: - - pullFrom + - secret type: object - loadBalancer: - type: boolean - nodeSelector: - additionalProperties: - type: string + pdbTlsCrt: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret type: object - oracleService: - type: string - ordsPassword: - description: OracleRestDataServicePassword defines the secret containing Password mapped to secretKey + pdbTlsKey: properties: - keepSecret: - type: boolean - secretKey: - default: oracle_pwd - type: string - secretName: - type: string + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object required: - - secretName + - secret type: object - ordsUser: + reuseTempFile: + type: boolean + sourceFileNameConversions: type: string - persistence: - description: OracleRestDataServicePersistence defines the storage releated params + sparseClonePath: + type: string + srcPdbName: + type: string + tdeExport: + type: boolean + tdeImport: + type: boolean + tdeKeystorePath: + type: string + tdePassword: properties: - accessMode: - enum: - - ReadWriteOnce - - ReadWriteMany - type: string - size: - type: string - storageClass: - type: string - volumeName: - type: string + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret type: object - replicas: - minimum: 1 - type: integer - restEnableSchemas: - items: - description: OracleRestDataServicePDBSchemas defines the PDB Schemas to be ORDS Enabled - properties: - enable: - type: boolean - pdbName: - type: string - schemaName: - type: string - urlMapping: - type: string - required: - - enable - - schemaName - type: object - type: array - serviceAccountName: + tdeSecret: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + tempSize: + type: string + totalSize: type: string - serviceAnnotations: - additionalProperties: - type: string + unlimitedStorage: + type: boolean + webServerPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret type: object + xmlFileName: + type: string required: - - adminPassword - - databaseRef - - ordsPassword + - action type: object status: - description: OracleRestDataServiceStatus defines the observed state of OracleRestDataService properties: - apexConfigured: - type: boolean - apexUrl: + action: type: string - commonUsersCreated: - type: boolean - databaseActionsUrl: + connString: type: string - databaseApiUrl: + modifyOption: type: string - databaseRef: + msg: type: string - image: - description: OracleRestDataServiceImage defines the Image source and pullSecrets for POD - properties: - pullFrom: - type: string - pullSecrets: - type: string - version: - type: string - required: - - pullFrom - type: object - loadBalancer: + openMode: type: string - ordsInstalled: - type: boolean - replicas: - type: integer - serviceIP: + phase: type: string status: - description: 'INSERT ADDITIONAL STATUS FIELD - define observed state of cluster Important: Run "make" to regenerate code after modifying this file' + type: boolean + totalSize: type: string + required: + - phase + - status type: object type: object served: true storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: cert-manager.io/inject-ca-from: oracle-database-operator-system/oracle-database-operator-serving-cert - controller-gen.kubebuilder.io/version: v0.6.1 - name: pdbs.database.oracle.com + controller-gen.kubebuilder.io/version: v0.16.5 + name: shardingdatabases.database.oracle.com spec: group: database.oracle.com names: - kind: PDB - listKind: PDBList - plural: pdbs - singular: pdb + kind: ShardingDatabase + listKind: ShardingDatabaseList + plural: shardingdatabases + singular: shardingdatabase scope: Namespaced versions: - additionalPrinterColumns: - - description: The connect string to be used - jsonPath: .status.connString - name: Connect_String - type: string - - description: Name of the CDB - jsonPath: .spec.cdbName - name: CDB Name - type: string - - description: Name of the PDB - jsonPath: .spec.pdbName - name: PDB Name - type: string - - description: PDB Open Mode - jsonPath: .status.openMode - name: PDB State - type: string - - description: Total Size of the PDB - jsonPath: .status.totalSize - name: PDB Size + - jsonPath: .status.gsm.state + name: Gsm State type: string - - description: Status of the PDB Resource - jsonPath: .status.phase - name: Status + - jsonPath: .status.gsm.services + name: Services type: string - - description: Error message, if any - jsonPath: .status.msg - name: Message + - jsonPath: .status.gsm.shards + name: shards + priority: 1 type: string name: v1alpha1 schema: openAPIV3Schema: - description: PDB is the Schema for the pdbs API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: PDBSpec defines the desired state of PDB properties: - action: - description: 'Action to be taken: Create/Clone/Plug/Unplug/Delete/Modify/Status/Map. Map is used to map a Databse PDB to a Kubernetes PDB CR.' - enum: - - Create - - Clone - - Plug - - Unplug - - Delete - - Modify - - Status - - Map + InvitedNodeSubnet: type: string - adminName: - description: The administrator username for the new PDB. This property is required when the Action property is Create. - properties: - secret: - description: PDBSecret defines the secretName - properties: - key: - type: string - secretName: + catalog: + items: + properties: + envVars: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + imagePullPolicy: + type: string + isDelete: + type: string + label: + type: string + name: + type: string + nodeSelector: + additionalProperties: type: string - required: - - key - - secretName - type: object - required: - - secret - type: object - adminPwd: - description: The administrator password for the new PDB. This property is required when the Action property is Create. - properties: - secret: - description: PDBSecret defines the secretName - properties: - key: + type: object + pvAnnotations: + additionalProperties: type: string - secretName: + type: object + pvMatchLabels: + additionalProperties: type: string - required: - - key - - secretName - type: object - required: - - secret - type: object - asClone: - description: Indicate if 'AS CLONE' option should be used in the command to plug in a PDB. This property is applicable when the Action property is PLUG but not required. - type: boolean - assertivePdbDeletion: - description: turn on the assertive approach to delete pdb resource kubectl delete pdb ..... automatically triggers the pluggable database deletion - type: boolean - cdbName: - description: Name of the CDB - type: string - cdbNamespace: - description: CDB Namespace - type: string - cdbResName: - description: Name of the CDB Custom Resource that runs the ORDS container - type: string - copyAction: - description: To copy files or not while cloning a PDB - enum: - - COPY - - NOCOPY - - MOVE - type: string - dropAction: - description: Specify if datafiles should be removed or not. The value can be INCLUDING or KEEP (default). - enum: - - INCLUDING - - KEEP - type: string - fileNameConversions: - description: Relevant for Create and Plug operations. As defined in the Oracle Multitenant Database documentation. Values can be a filename convert pattern or NONE. - type: string - getScript: - description: Whether you need the script only or execute the script - type: boolean - modifyOption: - description: Extra options for opening and closing a PDB - enum: - - IMMEDIATE - - NORMAL - - READ ONLY - - READ WRITE - - RESTRICTED + type: object + pvcName: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + storageSizeInGb: + format: int32 + type: integer + required: + - name + type: object + type: array + dbEdition: type: string - pdbName: - description: The name of the new PDB. Relevant for both Create and Plug Actions. + dbImage: type: string - pdbState: - description: The target state of the PDB - enum: - - OPEN - - CLOSE + dbImagePullSecret: type: string - pdbTlsCat: + dbSecret: properties: - secret: - description: PDBSecret defines the secretName - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object + encryptionType: + type: string + keyFileMountLocation: + type: string + keyFileName: + type: string + keySecretName: + type: string + name: + type: string + nsConfigMap: + type: string + nsSecret: + type: string + pwdFileMountLocation: + type: string + pwdFileName: + type: string required: - - secret + - name + - pwdFileName type: object - pdbTlsCrt: - properties: - secret: - description: PDBSecret defines the secretName - properties: - key: - type: string - secretName: + fssStorageClass: + type: string + gsm: + items: + properties: + directorName: + type: string + envVars: + description: Replicas int32 `json:"replicas,omitempty"` // Gsm Replicas. If you set OraGsmPvcName then it is set default to 1. + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + imagePullPolicy: + type: string + isDelete: + type: string + label: + type: string + name: + type: string + nodeSelector: + additionalProperties: type: string - required: - - key - - secretName - type: object - required: - - secret - type: object - pdbTlsKey: - properties: - secret: - description: PDBSecret defines the secretName - properties: - key: + type: object + pvAnnotations: + additionalProperties: type: string - secretName: + type: object + pvMatchLabels: + additionalProperties: type: string - required: - - key - - secretName - type: object - required: - - secret - type: object - reuseTempFile: - description: Whether to reuse temp file - type: boolean - sourceFileNameConversions: - description: This property is required when the Action property is Plug. As defined in the Oracle Multitenant Database documentation. Values can be a source filename convert pattern or NONE. + type: object + pvcName: + type: string + region: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + storageSizeInGb: + format: int32 + type: integer + required: + - name + type: object + type: array + gsmDevMode: type: string - sparseClonePath: - description: A Path specified for sparse clone snapshot copy. (Optional) + gsmImage: type: string - srcPdbName: - description: Name of the Source PDB from which to clone + gsmImagePullSecret: + type: string + gsmService: + items: + properties: + available: + type: string + clbGoal: + type: string + commitOutcome: + type: string + drainTimeout: + type: string + dtp: + type: string + edition: + type: string + failoverDelay: + type: string + failoverMethod: + type: string + failoverPrimary: + type: string + failoverRestore: + type: string + failoverRetry: + type: string + failoverType: + type: string + gdsPool: + type: string + lag: + type: integer + locality: + type: string + name: + type: string + notification: + type: string + pdbName: + type: string + policy: + type: string + preferred: + type: string + prferredAll: + type: string + regionFailover: + type: string + retention: + type: string + role: + type: string + sessionState: + type: string + sqlTransactionProfile: + type: string + stopOption: + type: string + tableFamily: + type: string + tfaPolicy: + type: string + required: + - name + type: object + type: array + gsmShardGroup: + items: + properties: + deployAs: + type: string + name: + type: string + region: + type: string + required: + - name + type: object + type: array + gsmShardSpace: + items: + properties: + chunks: + type: integer + name: + type: string + protectionMode: + type: string + shardGroup: + type: string + required: + - name + type: object + type: array + invitedNodeSubnetFlag: type: string - tdeExport: - description: TDE export for unplug operations + isClone: type: boolean - tdeImport: - description: TDE import for plug operations + isDataGuard: type: boolean - tdeKeystorePath: - description: TDE keystore path is required if the tdeImport or tdeExport flag is set to true. Can be used in plug or unplug operations. + isDebug: + type: boolean + isDeleteOraPvc: + type: boolean + isDownloadScripts: + type: boolean + isExternalSvc: + type: boolean + isTdeWallet: type: string - tdePassword: - description: TDE password if the tdeImport or tdeExport flag is set to true. Can be used in create, plug or unplug operations - properties: - secret: - description: PDBSecret defines the secretName - properties: - key: - type: string - secretName: + liveinessCheckPeriod: + type: integer + portMappings: + items: + properties: + port: + format: int32 + type: integer + protocol: + type: string + targetPort: + format: int32 + type: integer + required: + - port + - protocol + - targetPort + type: object + type: array + readinessCheckPeriod: + type: integer + replicationType: + type: string + scriptsLocation: + type: string + shard: + items: + properties: + deployAs: + type: string + envVars: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + imagePullPolicy: + type: string + isDelete: + enum: + - enable + - disable + - failed + - force + type: string + label: + type: string + name: + type: string + nodeSelector: + additionalProperties: type: string - required: - - key - - secretName - type: object - required: - - secret - type: object - tdeSecret: - description: TDE secret is required if the tdeImport or tdeExport flag is set to true. Can be used in plug or unplug operations. - properties: - secret: - description: PDBSecret defines the secretName - properties: - key: + type: object + pvAnnotations: + additionalProperties: type: string - secretName: + type: object + pvMatchLabels: + additionalProperties: type: string - required: - - key - - secretName - type: object - required: - - secret - type: object - tempSize: - description: Relevant for Create and Clone operations. Total size for temporary tablespace as defined in the Oracle Multitenant Database documentation. See size_clause description in Database SQL Language Reference documentation. + type: object + pvcName: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + shardGroup: + type: string + shardRegion: + type: string + shardSpace: + type: string + storageSizeInGb: + format: int32 + type: integer + required: + - name + type: object + type: array + shardBuddyRegion: type: string - totalSize: - description: Relevant for create and plug operations. Total size as defined in the Oracle Multitenant Database documentation. See size_clause description in Database SQL Language Reference documentation. + shardConfigName: type: string - unlimitedStorage: - description: Relevant for Create and Plug operations. True for unlimited storage. Even when set to true, totalSize and tempSize MUST be specified in the request if Action is Create. - type: boolean - webServerPwd: - description: Password for the Web ServerPDB User - properties: - secret: - description: PDBSecret defines the secretName - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - required: - - secret + shardRegion: + items: + type: string + type: array + shardingType: + type: string + stagePvcName: + type: string + storageClass: + type: string + tdeWalletPvc: + type: string + tdeWalletPvcMountLocation: + type: string + topicId: + type: string + required: + - catalog + - dbImage + - gsm + - gsmImage + - shard + type: object + status: + properties: + catalogs: + additionalProperties: + type: string type: object - webServerUser: - description: Web Server User with SQL Administrator role to allow us to authenticate to the PDB Lifecycle Management REST endpoints + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + gsm: properties: - secret: - description: PDBSecret defines the secretName - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName + details: + additionalProperties: + type: string + type: object + externalConnectStr: + type: string + internalConnectStr: + type: string + services: + type: string + shards: + additionalProperties: + type: string type: object - required: - - secret + state: + type: string + type: object + shards: + additionalProperties: + type: string type: object - xmlFileName: - description: XML metadata filename to be used for Plug or Unplug operations - type: string - required: - - action - type: object - status: - description: PDBStatus defines the observed state of PDB - properties: - action: - description: Last Completed Action - type: string - connString: - description: PDB Connect String - type: string - modifyOption: - description: Modify Option of the PDB - type: string - msg: - description: Message - type: string - openMode: - description: Open mode of the PDB - type: string - phase: - description: Phase of the PDB Resource - type: string - status: - description: PDB Resource Status - type: boolean - totalSize: - description: Total size of the PDB - type: string - required: - - phase - - status type: object type: object served: true - storage: true + storage: false subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.6.1 - creationTimestamp: null - name: shardingdatabases.database.oracle.com -spec: - group: database.oracle.com - names: - kind: ShardingDatabase - listKind: ShardingDatabaseList - plural: shardingdatabases - singular: shardingdatabase - scope: Namespaced - versions: - additionalPrinterColumns: - jsonPath: .status.gsm.state name: Gsm State @@ -2045,31 +11884,25 @@ spec: name: shards priority: 1 type: string - name: v1alpha1 + name: v4 schema: openAPIV3Schema: - description: ShardingDatabase is the Schema for the shardingdatabases API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: ShardingDatabaseSpec defines the desired state of ShardingDatabase properties: InvitedNodeSubnet: type: string catalog: items: - description: CatalogSpec defines the desired state of CatalogSpec properties: envVars: items: - description: EnvironmentVariable represents a named variable accessible for containers. properties: name: type: string @@ -2081,7 +11914,6 @@ spec: type: object type: array imagePullPolicy: - description: PullPolicy describes a policy for if/when to pull a container image type: string isDelete: type: string @@ -2104,15 +11936,13 @@ spec: pvcName: type: string resources: - description: ResourceRequirements describes the compute resource requirements. properties: claims: - description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + request: type: string required: - name @@ -2128,7 +11958,6 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -2137,9 +11966,14 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + shardGroup: + type: string + shardRegion: + type: string + shardSpace: + type: string storageSizeInGb: format: int32 type: integer @@ -2154,7 +11988,6 @@ spec: dbImagePullSecret: type: string dbSecret: - description: Secret Details properties: encryptionType: type: string @@ -2182,14 +12015,11 @@ spec: type: string gsm: items: - description: GsmSpec defines the desired state of GsmSpec properties: directorName: type: string envVars: - description: Replicas int32 `json:"replicas,omitempty"` // Gsm Replicas. If you set OraGsmPvcName then it is set default to 1. items: - description: EnvironmentVariable represents a named variable accessible for containers. properties: name: type: string @@ -2201,7 +12031,6 @@ spec: type: object type: array imagePullPolicy: - description: PullPolicy describes a policy for if/when to pull a container image type: string isDelete: type: string @@ -2213,6 +12042,10 @@ spec: additionalProperties: type: string type: object + pvAnnotations: + additionalProperties: + type: string + type: object pvMatchLabels: additionalProperties: type: string @@ -2222,15 +12055,13 @@ spec: region: type: string resources: - description: ResourceRequirements describes the compute resource requirements. properties: claims: - description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + request: type: string required: - name @@ -2246,7 +12077,6 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object requests: additionalProperties: @@ -2255,7 +12085,6 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object storageSizeInGb: @@ -2273,7 +12102,6 @@ spec: type: string gsmService: items: - description: Service Definition properties: available: type: string @@ -2352,227 +12180,493 @@ spec: type: array gsmShardSpace: items: - description: ShardSpace Specs properties: chunks: type: integer - name: + name: + type: string + protectionMode: + type: string + shardGroup: + type: string + required: + - name + type: object + type: array + invitedNodeSubnetFlag: + type: string + isClone: + type: boolean + isDataGuard: + type: boolean + isDebug: + type: boolean + isDeleteOraPvc: + type: boolean + isDownloadScripts: + type: boolean + isExternalSvc: + type: boolean + isTdeWallet: + type: string + liveinessCheckPeriod: + type: integer + portMappings: + items: + properties: + port: + format: int32 + type: integer + protocol: + type: string + targetPort: + format: int32 + type: integer + required: + - port + - protocol + - targetPort + type: object + type: array + readinessCheckPeriod: + type: integer + replicationType: + type: string + scriptsLocation: + type: string + shard: + items: + properties: + deployAs: + type: string + envVars: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + imagePullPolicy: + type: string + isDelete: + enum: + - enable + - disable + - failed + - force + type: string + label: + type: string + name: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + pvAnnotations: + additionalProperties: + type: string + type: object + pvMatchLabels: + additionalProperties: + type: string + type: object + pvcName: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + shardGroup: + type: string + shardRegion: + type: string + shardSpace: + type: string + storageSizeInGb: + format: int32 + type: integer + required: + - name + type: object + type: array + shardBuddyRegion: + type: string + shardConfigName: + type: string + shardRegion: + items: + type: string + type: array + shardingType: + type: string + stagePvcName: + type: string + storageClass: + type: string + tdeWalletPvc: + type: string + tdeWalletPvcMountLocation: + type: string + topicId: + type: string + required: + - catalog + - dbImage + - gsm + - gsmImage + - shard + type: object + status: + properties: + catalogs: + additionalProperties: + type: string + type: object + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string - protectionMode: + status: + enum: + - "True" + - "False" + - Unknown type: string - shardGroup: + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string required: - - name + - lastTransitionTime + - message + - reason + - status + - type type: object type: array - invitedNodeSubnetFlag: - type: string - isClone: - type: boolean - isDataGuard: + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + gsm: + properties: + details: + additionalProperties: + type: string + type: object + externalConnectStr: + type: string + internalConnectStr: + type: string + services: + type: string + shards: + additionalProperties: + type: string + type: object + state: + type: string + type: object + shards: + additionalProperties: + type: string + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: oracle-database-operator-system/oracle-database-operator-serving-cert + controller-gen.kubebuilder.io/version: v0.16.5 + name: singleinstancedatabases.database.oracle.com +spec: + group: database.oracle.com + names: + kind: SingleInstanceDatabase + listKind: SingleInstanceDatabaseList + plural: singleinstancedatabases + singular: singleinstancedatabase + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.edition + name: Edition + type: string + - jsonPath: .status.sid + name: Sid + priority: 1 + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .status.role + name: Role + type: string + - jsonPath: .status.releaseUpdate + name: Version + type: string + - jsonPath: .status.connectString + name: Connect Str + type: string + - jsonPath: .status.pdbConnectString + name: Pdb Connect Str + priority: 1 + type: string + - jsonPath: .status.tcpsConnectString + name: TCPS Connect Str + type: string + - jsonPath: .status.tcpsPdbConnectString + name: TCPS Pdb Connect Str + priority: 1 + type: string + - jsonPath: .status.oemExpressUrl + name: Oem Express Url + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + adminPassword: + properties: + keepSecret: + type: boolean + secretKey: + default: oracle_pwd + type: string + secretName: + type: string + required: + - secretName + type: object + archiveLog: type: boolean - isDebug: + charset: + type: string + convertToSnapshotStandby: type: boolean - isDeleteOraPvc: + createAs: + enum: + - primary + - standby + - clone + - truecache + type: string + edition: + enum: + - standard + - enterprise + - express + - free + type: string + enableTCPS: type: boolean - isDownloadScripts: + flashBack: type: boolean - isExternalSvc: + forceLog: type: boolean - isTdeWallet: - type: string - liveinessCheckPeriod: - type: integer - namespace: - type: string - portMappings: - items: - description: PortMapping is a specification of port mapping for an application deployment. - properties: - port: - format: int32 - type: integer - protocol: - default: TCP - type: string - targetPort: - format: int32 - type: integer - required: - - port - - protocol - - targetPort - type: object - type: array - readinessCheckPeriod: + image: + properties: + prebuiltDB: + type: boolean + pullFrom: + type: string + pullSecrets: + type: string + version: + type: string + required: + - pullFrom + type: object + initParams: + properties: + cpuCount: + type: integer + pgaAggregateTarget: + type: integer + processes: + type: integer + sgaTarget: + type: integer + type: object + listenerPort: type: integer - replicationType: - type: string - scriptsLocation: + loadBalancer: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + pdbName: type: string - shard: - description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run "make" to regenerate code after modifying this file' - items: - description: ShardSpec is a specification of Shards for an application deployment. - properties: - deployAs: - type: string - envVars: - items: - description: EnvironmentVariable represents a named variable accessible for containers. - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - imagePullPolicy: - description: PullPolicy describes a policy for if/when to pull a container image - type: string - isDelete: - enum: - - enable - - disable - - failed - - force - type: string - label: - type: string - name: - type: string - nodeSelector: - additionalProperties: + persistence: + properties: + accessMode: + enum: + - ReadWriteOnce + - ReadWriteMany + type: string + datafilesVolumeName: + type: string + scriptsVolumeName: + type: string + setWritePermissions: + type: boolean + size: + type: string + storageClass: + type: string + volumeClaimAnnotation: + type: string + type: object + primaryDatabaseRef: + type: string + readinessCheckPeriod: + type: integer + replicas: + type: integer + resources: + properties: + limits: + properties: + cpu: type: string - type: object - pvAnnotations: - additionalProperties: + memory: type: string - type: object - pvMatchLabels: - additionalProperties: + type: object + requests: + properties: + cpu: type: string - type: object - pvcName: - type: string - resources: - description: ResourceRequirements describes the compute resource requirements. - properties: - claims: - description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - shardGroup: - type: string - shardRegion: - type: string - shardSpace: - type: string - storageSizeInGb: - format: int32 - type: integer - required: - - name - type: object - type: array - shardBuddyRegion: + memory: + type: string + type: object + type: object + serviceAccountName: type: string - shardConfigName: + serviceAnnotations: + additionalProperties: + type: string + type: object + sid: + maxLength: 12 + pattern: ^[a-zA-Z0-9]+$ type: string - shardRegion: + tcpsCertRenewInterval: + type: string + tcpsListenerPort: + type: integer + tcpsTlsSecret: + type: string + trueCacheServices: items: type: string type: array - shardingType: + required: + - image + type: object + status: + properties: + apexInstalled: + type: boolean + archiveLog: type: string - stagePvcName: + certCreationTimestamp: type: string - storageClass: + certRenewInterval: type: string - tdeWalletPvc: + charset: type: string - tdeWalletPvcMountLocation: + clientWalletLoc: + type: string + clusterConnectString: type: string - required: - - catalog - - dbImage - - gsm - - gsmImage - - shard - type: object - status: - description: To understand Metav1.Condition, please refer the link https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1 ShardingDatabaseStatus defines the observed state of ShardingDatabase - properties: - catalogs: - additionalProperties: - type: string - type: object conditions: items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n \ttype FooStatus struct{ \t // Represents the observations of a foo's current state. \t // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" \t // +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map \t // +listMapKey=type \t Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields \t}" properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: - description: status of the condition, one of True, False, Unknown. enum: - "True" - "False" - Unknown type: string type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -2587,58 +12681,114 @@ spec: x-kubernetes-list-map-keys: - type x-kubernetes-list-type: map - gsm: + connectString: + type: string + convertToSnapshotStandby: + type: boolean + createdAs: + type: string + datafilesCreated: + default: "false" + type: string + datafilesPatched: + default: "false" + type: string + dgBroker: + type: string + edition: + type: string + flashBack: + type: string + forceLog: + type: string + initParams: properties: - details: - additionalProperties: - type: string - type: object - externalConnectStr: + cpuCount: + type: integer + pgaAggregateTarget: + type: integer + processes: + type: integer + sgaTarget: + type: integer + type: object + initPgaSize: + type: integer + initSgaSize: + type: integer + isTcpsEnabled: + default: false + type: boolean + nodes: + items: + type: string + type: array + oemExpressUrl: + type: string + ordsReference: + type: string + pdbConnectString: + type: string + pdbName: + type: string + persistence: + properties: + accessMode: + enum: + - ReadWriteOnce + - ReadWriteMany type: string - internalConnectStr: + datafilesVolumeName: type: string - services: + scriptsVolumeName: type: string - shards: - additionalProperties: - type: string - type: object - state: + setWritePermissions: + type: boolean + size: + type: string + storageClass: + type: string + volumeClaimAnnotation: type: string type: object - shards: + prebuiltDB: + type: boolean + primaryDatabase: + type: string + releaseUpdate: + type: string + replicas: + type: integer + role: + type: string + sid: + type: string + standbyDatabases: additionalProperties: type: string type: object + status: + type: string + tcpsConnectString: + type: string + tcpsPdbConnectString: + type: string + tcpsTlsSecret: + default: "" + type: string + required: + - isTcpsEnabled + - persistence + - tcpsTlsSecret type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - cert-manager.io/inject-ca-from: oracle-database-operator-system/oracle-database-operator-serving-cert - controller-gen.kubebuilder.io/version: v0.6.1 - name: singleinstancedatabases.database.oracle.com -spec: - group: database.oracle.com - names: - kind: SingleInstanceDatabase - listKind: SingleInstanceDatabaseList - plural: singleinstancedatabases - singular: singleinstancedatabase - scope: Namespaced - versions: + type: object + served: true + storage: false + subresources: + scale: + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} - additionalPrinterColumns: - jsonPath: .status.edition name: Edition @@ -2673,24 +12823,19 @@ spec: - jsonPath: .status.oemExpressUrl name: Oem Express Url type: string - name: v1alpha1 + name: v4 schema: openAPIV3Schema: - description: SingleInstanceDatabase is the Schema for the singleinstancedatabases API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: SingleInstanceDatabaseSpec defines the desired state of SingleInstanceDatabase properties: adminPassword: - description: SingleInsatnceAdminPassword defines the secret containing Admin Password mapped to secretKey for Database properties: keepSecret: type: boolean @@ -2706,14 +12851,15 @@ spec: type: boolean charset: type: string + convertToSnapshotStandby: + type: boolean createAs: enum: - primary - standby - clone + - truecache type: string - dgBrokerConfigured: - type: boolean edition: enum: - standard @@ -2728,7 +12874,6 @@ spec: forceLog: type: boolean image: - description: SingleInstanceDatabaseImage defines the Image source and pullSecrets for POD properties: prebuiltDB: type: boolean @@ -2742,7 +12887,6 @@ spec: - pullFrom type: object initParams: - description: SingleInstanceDatabaseInitParams defines the Init Parameters properties: cpuCount: type: integer @@ -2764,7 +12908,6 @@ spec: pdbName: type: string persistence: - description: SingleInstanceDatabasePersistence defines the storage size and class for PVC properties: accessMode: enum: @@ -2814,7 +12957,6 @@ spec: type: string type: object sid: - description: SID must be alphanumeric (no special characters, only a-z, A-Z, 0-9), and no longer than 12 characters. maxLength: 12 pattern: ^[a-zA-Z0-9]+$ type: string @@ -2824,11 +12966,14 @@ spec: type: integer tcpsTlsSecret: type: string + trueCacheServices: + items: + type: string + type: array required: - image type: object status: - description: SingleInstanceDatabaseStatus defines the observed state of SingleInstanceDatabase properties: apexInstalled: type: boolean @@ -2846,36 +12991,29 @@ spec: type: string conditions: items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n \ttype FooStatus struct{ \t // Represents the observations of a foo's current state. \t // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" \t // +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map \t // +listMapKey=type \t Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields \t}" properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: - description: status of the condition, one of True, False, Unknown. enum: - "True" - "False" - Unknown type: string type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -2892,6 +13030,8 @@ spec: x-kubernetes-list-type: map connectString: type: string + convertToSnapshotStandby: + type: boolean createdAs: type: string datafilesCreated: @@ -2900,8 +13040,8 @@ spec: datafilesPatched: default: "false" type: string - dgBrokerConfigured: - type: boolean + dgBroker: + type: string edition: type: string flashBack: @@ -2909,7 +13049,6 @@ spec: forceLog: type: string initParams: - description: SingleInstanceDatabaseInitParams defines the Init Parameters properties: cpuCount: type: integer @@ -2940,7 +13079,6 @@ spec: pdbName: type: string persistence: - description: SingleInstanceDatabasePersistence defines the storage size and class for PVC properties: accessMode: enum: @@ -2998,12 +13136,6 @@ spec: specReplicasPath: .spec.replicas statusReplicasPath: .status.replicas status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role @@ -3021,360 +13153,56 @@ rules: - watch - create - update - - patch - - delete -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - "" - resources: - - configmaps/status - verbs: - - get - - update - - patch -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - creationTimestamp: null - name: oracle-database-operator-manager-role -rules: -- apiGroups: - - "" - resources: - - configmaps - - deployments - - events - - pods - - secrets - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - configmaps - - events - - pods - - pods/exec - - pods/log - - replicasets - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - configmaps - - namespaces - - secrets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - configmaps - - secrets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - deployments - - events - - pods - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch -- apiGroups: - - "" - resources: - - persistentvolumeclaims - - pods - - pods/exec - - pods/log - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - persistentvolumes - verbs: - - get - - list - - watch -- apiGroups: - - '''''' - resources: - - statefulsets/finalizers - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - apps - resources: - - configmaps - verbs: - - get - - list -- apiGroups: - - apps - resources: - - deployments - - pods - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - apps - resources: - - replicasets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - apps - resources: - - statefulsets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create - - get - - list - - update -- apiGroups: - - "" - resources: - - configmaps - - containers - - events - - namespaces - - persistentvolumeclaims - - pods - - pods/exec - - pods/log - - secrets - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - configmaps - - containers - - events - - namespaces - - pods - - pods/exec - - pods/log - - secrets - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - configmaps - - namespaces - - pods - - secrets - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - pods/exec - verbs: - - create + - patch + - delete - apiGroups: - - database.oracle.com + - coordination.k8s.io resources: - - autonomouscontainerdatabases + - leases verbs: - - create - - delete - get - list - - patch - - update - watch -- apiGroups: - - database.oracle.com - resources: - - autonomouscontainerdatabases/status - verbs: - - get - - patch - - update -- apiGroups: - - database.oracle.com - resources: - - autonomousdatabasebackups - verbs: - create - - delete - - get - - list - update - - watch -- apiGroups: - - database.oracle.com - resources: - - autonomousdatabasebackups/status - verbs: - - get - patch - - update -- apiGroups: - - database.oracle.com - resources: - - autonomousdatabaserestores - verbs: - - create - delete - - get - - list - - update - - watch - apiGroups: - - database.oracle.com + - "" resources: - - autonomousdatabaserestores/status + - configmaps/status verbs: - get - - patch - update -- apiGroups: - - database.oracle.com - resources: - - autonomousdatabases - verbs: - - create - - delete - - get - - list - patch - - update - - watch - apiGroups: - - database.oracle.com + - "" resources: - - autonomousdatabases/status + - events verbs: + - create - patch - - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: oracle-database-operator-manager-role +rules: - apiGroups: - - database.oracle.com + - "" resources: - - cdbs + - configmaps + - containers + - deployments + - events + - namespaces + - persistentvolumeclaims + - pods + - pods/exec + - pods/log + - replicasets + - secrets + - services verbs: - create - delete @@ -3384,49 +13212,35 @@ rules: - update - watch - apiGroups: - - database.oracle.com - resources: - - cdbs/finalizers - verbs: - - update -- apiGroups: - - database.oracle.com + - "" resources: - - cdbs/status + - configmaps/status + - daemonsets/status + - deployments/status + - services/status + - statefulsets/status verbs: - get - patch - update - apiGroups: - - database.oracle.com + - "" resources: - - dataguardbrokers + - persistentvolumes verbs: - - create - - delete - get - list - - patch - - update - watch - apiGroups: - - database.oracle.com - resources: - - dataguardbrokers/finalizers - verbs: - - update -- apiGroups: - - database.oracle.com + - "" resources: - - dataguardbrokers/status + - secrets/status verbs: - get - - patch - - update - apiGroups: - - database.oracle.com + - '''''' resources: - - dbcssystems + - statefulsets/finalizers verbs: - create - delete @@ -3436,27 +13250,21 @@ rules: - update - watch - apiGroups: - - database.oracle.com - resources: - - dbcssystems/finalizers - verbs: - - create - - delete - - get - - patch - - update -- apiGroups: - - database.oracle.com + - apps resources: - - dbcssystems/status + - configmaps verbs: - get - - patch - - update + - list - apiGroups: - - database.oracle.com + - apps resources: - - oraclerestdataservices + - daemonsets + - deployments + - pods + - replicasets + - services + - statefulsets verbs: - create - delete @@ -3466,23 +13274,30 @@ rules: - update - watch - apiGroups: - - database.oracle.com - resources: - - oraclerestdataservices/finalizers - verbs: - - update -- apiGroups: - - database.oracle.com + - coordination.k8s.io resources: - - oraclerestdataservices/status + - leases verbs: + - create - get - - patch + - list - update - apiGroups: - database.oracle.com resources: + - autonomouscontainerdatabases + - autonomousdatabases + - cdbs + - dataguardbrokers + - dbcssystems + - events + - lrests + - lrpdbs + - oraclerestdataservices + - ordssrvs - pdbs + - shardingdatabases + - singleinstancedatabases verbs: - create - delete @@ -3494,17 +13309,19 @@ rules: - apiGroups: - database.oracle.com resources: - - pdbs/finalizers - verbs: - - create - - delete - - get - - patch - - update -- apiGroups: - - database.oracle.com - resources: + - autonomouscontainerdatabases/status + - autonomousdatabasebackups/status + - autonomousdatabaserestores/status + - cdbs/status + - dataguardbrokers/status + - dbcssystems/status + - lrests/status + - lrpdbs/status + - oraclerestdataservices/status + - ordssrvs/status - pdbs/status + - shardingdatabases/status + - singleinstancedatabases/status verbs: - get - patch @@ -3512,57 +13329,44 @@ rules: - apiGroups: - database.oracle.com resources: - - shardingdatabases + - autonomousdatabasebackups + - autonomousdatabaserestores verbs: - create - delete - get - list - - patch - update - watch - apiGroups: - database.oracle.com resources: - - shardingdatabases/finalizers + - autonomousdatabases/status verbs: - - create - - delete - - get - patch - update - apiGroups: - database.oracle.com resources: - - shardingdatabases/status + - cdbs/finalizers + - dataguardbrokers/finalizers + - lrests/finalizers + - oraclerestdataservices/finalizers + - ordssrvs/finalizers + - singleinstancedatabases/finalizers verbs: - - get - - patch - update - apiGroups: - database.oracle.com resources: - - singleinstancedatabases + - dbcssystems/finalizers + - lrpdbs/finalizers + - pdbs/finalizers + - shardingdatabases/finalizers verbs: - create - delete - get - - list - - patch - - update - - watch -- apiGroups: - - database.oracle.com - resources: - - singleinstancedatabases/finalizers - verbs: - - update -- apiGroups: - - database.oracle.com - resources: - - singleinstancedatabases/status - verbs: - - get - patch - update - apiGroups: @@ -3744,19 +13548,40 @@ webhooks: service: name: oracle-database-operator-webhook-service namespace: oracle-database-operator-system - path: /mutate-database-oracle-com-v1alpha1-autonomousdatabase + path: /mutate-database-oracle-com-v4-autonomousdatabasebackup + failurePolicy: Fail + name: mautonomousdatabasebackupv4.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - autonomousdatabasebackups + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /mutate-database-oracle-com-v4-cdb failurePolicy: Fail - name: mautonomousdatabase.kb.io + name: mcdb.kb.io rules: - apiGroups: - database.oracle.com apiVersions: - - v1alpha1 + - v4 operations: - CREATE - UPDATE resources: - - autonomousdatabases + - cdbs sideEffects: None - admissionReviewVersions: - v1 @@ -3764,19 +13589,61 @@ webhooks: service: name: oracle-database-operator-webhook-service namespace: oracle-database-operator-system - path: /mutate-database-oracle-com-v1alpha1-autonomousdatabasebackup + path: /mutate-database-oracle-com-v4-dbcssystem failurePolicy: Fail - name: mautonomousdatabasebackup.kb.io + name: mdbcssystemv4.kb.io rules: - apiGroups: - database.oracle.com apiVersions: - - v1alpha1 + - v4 operations: - CREATE - UPDATE resources: - - autonomousdatabasebackups + - dbcssystems + sideEffects: None +- admissionReviewVersions: + - v4 + - v1beta1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /mutate-database-oracle-com-v4-lrest + failurePolicy: Fail + name: mlrest.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - lrests + sideEffects: None +- admissionReviewVersions: + - v4 + - v1beta1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /mutate-database-oracle-com-v4-lrpdb + failurePolicy: Fail + name: mlrpdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - lrpdbs sideEffects: None - admissionReviewVersions: - v1 @@ -3785,9 +13652,49 @@ webhooks: service: name: oracle-database-operator-webhook-service namespace: oracle-database-operator-system - path: /mutate-database-oracle-com-v1alpha1-cdb + path: /mutate-database-oracle-com-v4-pdb failurePolicy: Fail - name: mcdb.kb.io + name: mpdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - pdbs + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /mutate-database-oracle-com-v4-shardingdatabase + failurePolicy: Fail + name: mshardingdatabasev4.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - shardingdatabases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /mutate-database-oracle-com-v1alpha1-autonomousdatabasebackup + failurePolicy: Fail + name: mautonomousdatabasebackupv1alpha1.kb.io rules: - apiGroups: - database.oracle.com @@ -3797,7 +13704,7 @@ webhooks: - CREATE - UPDATE resources: - - cdbs + - autonomousdatabasebackups sideEffects: None - admissionReviewVersions: - v1 @@ -3820,6 +13727,26 @@ webhooks: resources: - dataguardbrokers sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /mutate-database-oracle-com-v4-dbcssystem + failurePolicy: Fail + name: mdbcssystemv1alpha1.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - dbcssystems + sideEffects: None - admissionReviewVersions: - v1 - v1beta1 @@ -3843,14 +13770,13 @@ webhooks: sideEffects: None - admissionReviewVersions: - v1 - - v1beta1 clientConfig: service: name: oracle-database-operator-webhook-service namespace: oracle-database-operator-system - path: /mutate-database-oracle-com-v1alpha1-pdb + path: /mutate-database-oracle-com-v1alpha1-shardingdatabase failurePolicy: Fail - name: mpdb.kb.io + name: mshardingdatabasev1alpha1.kb.io rules: - apiGroups: - database.oracle.com @@ -3860,7 +13786,7 @@ webhooks: - CREATE - UPDATE resources: - - pdbs + - shardingdatabases sideEffects: None - admissionReviewVersions: - v1 @@ -3909,14 +13835,14 @@ webhooks: service: name: oracle-database-operator-webhook-service namespace: oracle-database-operator-system - path: /mutate-observability-oracle-com-v1alpha1-databaseobserver + path: /mutate-observability-oracle-com-v4-databaseobserver failurePolicy: Fail name: mdatabaseobserver.kb.io rules: - apiGroups: - observability.oracle.com apiVersions: - - v1alpha1 + - v4 operations: - CREATE - UPDATE @@ -3937,14 +13863,14 @@ webhooks: service: name: oracle-database-operator-webhook-service namespace: oracle-database-operator-system - path: /validate-database-oracle-com-v1alpha1-autonomouscontainerdatabase + path: /validate-database-oracle-com-v4-autonomouscontainerdatabase failurePolicy: Fail - name: vautonomouscontainerdatabase.kb.io + name: vautonomouscontainerdatabasev4.kb.io rules: - apiGroups: - database.oracle.com apiVersions: - - v1alpha1 + - v4 operations: - CREATE - UPDATE @@ -3957,9 +13883,154 @@ webhooks: service: name: oracle-database-operator-webhook-service namespace: oracle-database-operator-system - path: /validate-database-oracle-com-v1alpha1-autonomousdatabase + path: /validate-database-oracle-com-v4-autonomousdatabasebackup + failurePolicy: Fail + name: vautonomousdatabasebackupv4.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - autonomousdatabasebackups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /validate-database-oracle-com-v4-autonomousdatabaserestore + failurePolicy: Fail + name: vautonomousdatabaserestorev4.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - autonomousdatabaserestores + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /validate-database-oracle-com-v4-cdb + failurePolicy: Fail + name: vcdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - cdbs + sideEffects: None +- admissionReviewVersions: + - v4 + - v1beta1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /validate-database-oracle-com-v4-lrest + failurePolicy: Fail + name: vlrest.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - lrests + sideEffects: None +- admissionReviewVersions: + - v4 + - v1beta1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /validate-database-oracle-com-v4-lrpdb + failurePolicy: Fail + name: vlrpdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - lrpdbs + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /validate-database-oracle-com-v4-pdb + failurePolicy: Fail + name: vpdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - pdbs + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /validate-database-oracle-com-v4-shardingdatabase + failurePolicy: Fail + name: vshardingdatabasev4.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + - DELETE + resources: + - shardingdatabases + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: oracle-database-operator-webhook-service + namespace: oracle-database-operator-system + path: /validate-database-oracle-com-v1alpha1-autonomouscontainerdatabase failurePolicy: Fail - name: vautonomousdatabase.kb.io + name: vautonomouscontainerdatabasev1alpha1.kb.io rules: - apiGroups: - database.oracle.com @@ -3969,7 +14040,7 @@ webhooks: - CREATE - UPDATE resources: - - autonomousdatabases + - autonomouscontainerdatabases sideEffects: None - admissionReviewVersions: - v1 @@ -3979,7 +14050,7 @@ webhooks: namespace: oracle-database-operator-system path: /validate-database-oracle-com-v1alpha1-autonomousdatabasebackup failurePolicy: Fail - name: vautonomousdatabasebackup.kb.io + name: vautonomousdatabasebackupv1alpha1.kb.io rules: - apiGroups: - database.oracle.com @@ -3999,7 +14070,7 @@ webhooks: namespace: oracle-database-operator-system path: /validate-database-oracle-com-v1alpha1-autonomousdatabaserestore failurePolicy: Fail - name: vautonomousdatabaserestore.kb.io + name: vautonomousdatabaserestorev1alpha1.kb.io rules: - apiGroups: - database.oracle.com @@ -4013,14 +14084,13 @@ webhooks: sideEffects: None - admissionReviewVersions: - v1 - - v1beta1 clientConfig: service: name: oracle-database-operator-webhook-service namespace: oracle-database-operator-system - path: /validate-database-oracle-com-v1alpha1-cdb + path: /validate-database-oracle-com-v1alpha1-autonomousdatabase failurePolicy: Fail - name: vcdb.kb.io + name: vautonomousdatabasev1alpha1.kb.io rules: - apiGroups: - database.oracle.com @@ -4030,7 +14100,7 @@ webhooks: - CREATE - UPDATE resources: - - cdbs + - autonomousdatabases sideEffects: None - admissionReviewVersions: - v1 @@ -4076,14 +14146,13 @@ webhooks: sideEffects: None - admissionReviewVersions: - v1 - - v1beta1 clientConfig: service: name: oracle-database-operator-webhook-service namespace: oracle-database-operator-system - path: /validate-database-oracle-com-v1alpha1-pdb + path: /validate-database-oracle-com-v1alpha1-shardingdatabase failurePolicy: Fail - name: vpdb.kb.io + name: vshardingdatabasev1alpha1.kb.io rules: - apiGroups: - database.oracle.com @@ -4092,8 +14161,9 @@ webhooks: operations: - CREATE - UPDATE + - DELETE resources: - - pdbs + - shardingdatabases sideEffects: None - admissionReviewVersions: - v1 @@ -4144,14 +14214,14 @@ webhooks: service: name: oracle-database-operator-webhook-service namespace: oracle-database-operator-system - path: /validate-observability-oracle-com-v1alpha1-databaseobserver + path: /validate-observability-oracle-com-v4-databaseobserver failurePolicy: Fail name: vdatabaseobserver.kb.io rules: - apiGroups: - observability.oracle.com apiVersions: - - v1alpha1 + - v4 operations: - CREATE - UPDATE @@ -4184,7 +14254,7 @@ spec: env: - name: WATCH_NAMESPACE value: "" - image: container-registry.oracle.com/database/operator:latest + image: container-registry.oracle.com/database/operator:1.2.0 imagePullPolicy: Always name: manager ports: diff --git a/ords/Dockerfile b/ords/Dockerfile index 772a7e6d..25ba08ec 100644 --- a/ords/Dockerfile +++ b/ords/Dockerfile @@ -40,7 +40,9 @@ FROM container-registry.oracle.com/java/jdk:latest # ------------------------------------------------------------- ENV ORDS_HOME=/opt/oracle/ords/ \ RUN_FILE="runOrdsSSL.sh" \ - ORDSVERSION=23.4.0-8 + ORDSVERSION=23.4.0-8 \ + JAVA=17 +#see https://www.oracle.com/tools/ords/ords-relnotes-23.4.0.html # Copy binaries # ------------- @@ -48,7 +50,7 @@ COPY $RUN_FILE $ORDS_HOME RUN yum -y install yum-utils bind-utils tree hostname openssl net-tools zip unzip tar wget vim-minimal which sudo expect procps curl lsof && \ yum-config-manager --add-repo=http://yum.oracle.com/repo/OracleLinux/OL8/oracle/software/x86_64 && \ - yum -y install java-11-openjdk-devel && \ + yum -y install java-$JAVA-openjdk-devel && \ yum -y install iproute && \ yum clean all @@ -64,14 +66,18 @@ RUN mkdir -p $ORDS_HOME/doc_root && \ chmod ug+x $ORDS_HOME/*.sh && \ groupadd -g 54322 dba && \ usermod -u 54321 -d /home/oracle -g dba -m -s /bin/bash oracle && \ - chown -R oracle:dba $ORDS_HOME && \ - echo "oracle ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + chown -R oracle:dba $ORDS_HOME +# echo "oracle ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + +RUN echo "unset R1" >> /home/oracle/.bashrc && \ + chown root:root /home/oracle/.bashrc && chmod +r /home/oracle/.bashrc # Finalize setup # ------------------- USER oracle WORKDIR /home/oracle + VOLUME ["$ORDS_HOME/config/ords"] EXPOSE 8888 diff --git a/ords/ords_init.sh b/ords/ords_init.sh new file mode 100644 index 00000000..0994dceb --- /dev/null +++ b/ords/ords_init.sh @@ -0,0 +1,484 @@ +#!/bin/bash +## Copyright (c) 2006, 2024, Oracle and/or its affiliates. +## +## The Universal Permissive License (UPL), Version 1.0 +## +## Subject to the condition set forth below, permission is hereby granted to any +## person obtaining a copy of this software, associated documentation and/or data +## (collectively the "Software"), free of charge and under any and all copyright +## rights in the Software, and any and all patent rights owned or freely +## licensable by each licensor hereunder covering either (i) the unmodified +## Software as contributed to or provided by such licensor, or (ii) the Larger +## Works (as defined below), to deal in both +## +## (a) the Software, and +## (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +## one is included with the Software (each a "Larger Work" to which the Software +## is contributed by such licensors), +## +## without restriction, including without limitation the rights to copy, create +## derivative works of, display, perform, and distribute the Software and make, +## use, sell, offer for sale, import, export, have made, and have sold the +## Software and the Larger Work(s), and to sublicense the foregoing rights on +## either these or other terms. +## +## This license is subject to the following condition: +## The above copyright notice and either this complete permission notice or at +## a minimum a reference to the UPL must be included in all copies or +## substantial portions of the Software. +## +## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +## SOFTWARE. + +dump_stack(){ +_log_date=`date "+%y:%m:%d %H:%M:%S"` + local frame=0 + local line_no + local function_name + local file_name + echo -e "BACKTRACE [${_log_date}]\n" + echo -e "filename:line\tfunction " + echo -e "------------- --------" + while caller $frame ;do ((frame++)) ;done | \ + while read line_no function_name file_name;\ + do echo -e "$file_name:$line_no\t$function_name" ;done >&2 +} + + + +get_conn_string() { + local -n _conn_string="${1}" + + local -r _admin_user=$($ords_cfg_cmd get --secret db.adminUser | tail -1) + local _conn_type=$($ords_cfg_cmd get db.connectionType |tail -1) + if [[ $_conn_type == "customurl" ]]; then + local -r _conn=$($ords_cfg_cmd get db.customURL | tail -1) + elif [[ $_conn_type == "tns" ]]; then + local -r _tns_service=$($ords_cfg_cmd get db.tnsAliasName | tail -1) + local -r _conn=${_tns_service} + elif [[ $_conn_type == "basic" ]]; then + local -r _host=$($ords_cfg_cmd get db.hostname | tail -1) + local -r _port=$($ords_cfg_cmd get db.port | tail -1) + local -r _service=$($ords_cfg_cmd get db.servicename | tail -1) + local -r _sid=$($ords_cfg_cmd get db.sid | tail -1) + + if [[ -n ${_host} ]] && [[ -n ${_port} ]]; then + if [[ -n ${_service} ]] || [[ -n ${_sid} ]]; then + local -r _conn=${_host}:${_port}/${_service:-$_sid} + fi + fi + else + # wallet + _conn_type="wallet" + local -r _wallet_service=$($ords_cfg_cmd get db.wallet.zip.service | tail -1) + local -r _conn=${_wallet_service} + fi + + if [[ -n ${_conn} ]]; then + echo "Connection String (${_conn_type}): ${_conn}" + _conn_string="${_admin_user%%/ *}/${config["dbadminusersecret"]}@${_conn}" + if [[ ${_admin_user%%/ *} == "SYS" ]]; then + _conn_string="${_conn_string=} AS SYSDBA" + fi + fi +} + +#------------------------------------------------------------------------------ +function run_sql { + local -r _conn_string="${1}" + local -r _sql="${2}" + local -n _output="${3}" + local -i _rc=0 + + if [[ -z ${_sql} ]]; then + dump_stack + echo "FATAL: Dear Developer.. you've got a bug calling run_sql" && exit 1 + fi + ## Get TNS_ADMIN location + local -r _tns_admin=$($ords_cfg_cmd get db.tnsDirectory | tail -1) + if [[ ! $_tns_admin =~ "Cannot get setting" ]]; then + echo "Setting: TNS_ADMIN=${_tns_admin}" + export TNS_ADMIN=${_tns_admin} + fi + + ## Get ADB Wallet + local -r _wallet_zip_path=$($ords_cfg_cmd get db.wallet.zip.path | tail -1) + if [[ ! $_wallet_zip_path =~ "Cannot get setting" ]]; then + echo "Using: set cloudconfig ${_wallet_zip_path}" + local -r _cloudconfig="set cloudconfig ${_wallet_zip_path}" + fi + + # NOTE to maintainer; the heredoc must be TAB indented + echo "Running SQL..." + #_output=$(cd ${APEX_HOME}/${APEX_VER} && sql -S /nolog <<-EOSQL + _output=$(cd ${APEX_HOME}/${APEX_VER} && sql -S -nohistory -noupdates /nolog <<-EOSQL + WHENEVER SQLERROR EXIT 1 + WHENEVER OSERROR EXIT 1 + ${_cloudconfig} + connect $_conn_string + set serveroutput on echo off pause off feedback off + set heading off wrap off linesize 1000 pagesize 0 + SET TERMOUT OFF VERIFY OFF + ${_sql} + exit; + EOSQL + ) + _rc=$? + + if (( ${_rc} > 0 )); then + dump_stack + echo "SQLERROR: ${_output}" + fi + + return $_rc +} + +#------------------------------------------------------------------------------ +function check_adb() { + local -r _conn_string=$1 + local -n _is_adb=$2 + + local -r _adb_chk_sql=" + DECLARE + invalid_column exception; + pragma exception_init (invalid_column,-00904); + adb_check integer; + BEGIN + EXECUTE IMMEDIATE q'[SELECT COUNT(*) FROM ( + SELECT JSON_VALUE(cloud_identity, '\$.DATABASE_OCID') AS database_ocid + FROM v\$pdbs) t + WHERE t.database_ocid like '%AUTONOMOUS%']' INTO adb_check; + DBMS_OUTPUT.PUT_LINE(adb_check); + EXCEPTION WHEN invalid_column THEN + DBMS_OUTPUT.PUT_LINE('0'); + END; + /" + echo "Checking if Database is an ADB" + run_sql "${_conn_string}" "${_adb_chk_sql}" "_adb_check" + _rc=$? + + if (( ${_rc} == 0 )); then + _adb_check=${_adb_check//[[:space:]]/} + echo "ADB Check: ${_adb_check}" + if (( ${_adb_check} == 1 )); then + _is_adb=${_adb_check//[[:space:]]/} + fi + fi + + return ${_rc} +} + +function create_adb_user() { + local -r _conn_string="${1}" + local -r _pool_name="${2}" + + local _config_user=$($ords_cfg_cmd get db.username | tail -1) + + if [[ -z ${_config_user} ]] || [[ ${_config_user} == "ORDS_PUBLIC_USER" ]]; then + echo "FATAL: You must specify a db.username <> ORDS_PUBLIC_USER in pool ${_pool_name}" + dump_stack + return 1 + fi + + local -r _adb_user_sql=" + DECLARE + l_user VARCHAR2(255); + l_cdn VARCHAR2(255); + BEGIN + BEGIN + SELECT USERNAME INTO l_user FROM DBA_USERS WHERE USERNAME='${_config_user}'; + EXECUTE IMMEDIATE 'ALTER USER \"${_config_user}\" PROFILE ORA_APP_PROFILE'; + EXECUTE IMMEDIATE 'ALTER USER \"${_config_user}\" IDENTIFIED BY \"${config["dbsecret"]}\"'; + DBMS_OUTPUT.PUT_LINE('${_config_user} Exists - Password reset'); + EXCEPTION + WHEN NO_DATA_FOUND THEN + EXECUTE IMMEDIATE 'CREATE USER \"${_config_user}\" IDENTIFIED BY \"${config["dbsecret"]}\" PROFILE ORA_APP_PROFILE'; + DBMS_OUTPUT.PUT_LINE('${_config_user} Created'); + END; + EXECUTE IMMEDIATE 'GRANT CONNECT TO \"${_config_user}\"'; + BEGIN + SELECT USERNAME INTO l_user FROM DBA_USERS WHERE USERNAME='ORDS_PLSQL_GATEWAY_OPER'; + EXECUTE IMMEDIATE 'ALTER USER \"ORDS_PLSQL_GATEWAY_OPER\" PROFILE DEFAULT'; + EXECUTE IMMEDIATE 'ALTER USER \"ORDS_PLSQL_GATEWAY_OPER\" NO AUTHENTICATION'; + DBMS_OUTPUT.PUT_LINE('ORDS_PLSQL_GATEWAY_OPER Exists'); + EXCEPTION + WHEN NO_DATA_FOUND THEN + EXECUTE IMMEDIATE 'CREATE USER \"ORDS_PLSQL_GATEWAY_OPER\" NO AUTHENTICATION PROFILE DEFAULT'; + DBMS_OUTPUT.PUT_LINE('ORDS_PLSQL_GATEWAY_OPER Created'); + END; + EXECUTE IMMEDIATE 'GRANT CONNECT TO \"ORDS_PLSQL_GATEWAY_OPER\"'; + EXECUTE IMMEDIATE 'ALTER USER \"ORDS_PLSQL_GATEWAY_OPER\" GRANT CONNECT THROUGH \"${_config_user}\"'; + ORDS_ADMIN.PROVISION_RUNTIME_ROLE ( + p_user => '${_config_user}' + ,p_proxy_enabled_schemas => TRUE + ); + ORDS_ADMIN.CONFIG_PLSQL_GATEWAY ( + p_runtime_user => '${_config_user}' + ,p_plsql_gateway_user => 'ORDS_PLSQL_GATEWAY_OPER' + ); + -- TODO: Only do this if ADB APEX Version <> this ORDS Version + BEGIN + SELECT images_version INTO L_CDN + FROM APEX_PATCHES + where is_bundle_patch = 'Yes' + order by patch_version desc + fetch first 1 rows only; + EXCEPTION WHEN NO_DATA_FOUND THEN + select version_no INTO L_CDN + from APEX_RELEASE; + END; + apex_instance_admin.set_parameter( + p_parameter => 'IMAGE_PREFIX', + p_value => 'https://static.oracle.com/cdn/apex/'||L_CDN||'/' + ); + END; + /" + + run_sql "${_conn_string}" "${_adb_user_sql}" "_adb_user_sql_output" + _rc=$? + + echo "Installation Output: ${_adb_user_sql_output}" + return ${_rc} +} + +#------------------------------------------------------------------------------ +function compare_versions() { + local _db_ver=$1 + local _im_ver=$2 + + IFS='.' read -r -a _db_ver_array <<< "$_db_ver" + IFS='.' read -r -a _im_ver_array <<< "$_im_ver" + + # Compare each component + local i + for i in "${!_db_ver_array[@]}"; do + if [[ "${_db_ver_array[$i]}" -lt "${_im_ver_array[$i]}" ]]; then + # _db_ver < _im_ver (upgrade) + return 0 + elif [[ "${_db_ver_array[$i]}" -gt "${_im_ver_array[$i]}" ]]; then + # _db_ver < _im_ver (do nothing) + return 1 + fi + done + # _db_ver == __im_ver (do nothing) + return 1 +} + +#------------------------------------------------------------------------------ +set_secret() { + local -r _pool_name="${1}" + local -r _config_key="${2}" + local -r _config_val="${3}" + local -i _rc=0 + + if [[ -n "${_config_val}" ]]; then + ords --config "$ORDS_CONFIG" config --db-pool "${_pool_name}" secret --password-stdin "${_config_key}" <<< "${_config_val}" + _rc=$? + echo "${_config_key} in pool ${_pool_name} set" + else + echo "${_config_key} in pool ${_pool_name}, not defined" + _rc=0 + fi + + return ${_rc} +} + +#------------------------------------------------------------------------------ +ords_upgrade() { + local -r _pool_name="${1}" + local -r _upgrade_key="${2}" + local -i _rc=0 + + if [[ -n "${config["dbadminusersecret"]}" ]]; then + # Get usernames + local -r ords_user=$($ords_cfg_cmd get db.username | tail -1) + local -r ords_admin=$($ords_cfg_cmd get db.adminUser | tail -1) + + echo "Performing ORDS install/upgrade as $ords_admin into $ords_user on pool ${_pool_name}" + if [[ ${_pool_name} == "default" ]]; then + ords --config "$ORDS_CONFIG" install --db-only \ + --admin-user "$ords_admin" --password-stdin <<< "${config["dbadminusersecret"]}" + _rc=$? + else + ords --config "$ORDS_CONFIG" install --db-pool "${_pool_name}" --db-only \ + --admin-user "$ords_admin" --password-stdin <<< "${config["dbadminusersecret"]}" + _rc=$? + fi + + # Dar be bugs below deck with --db-user so using the above + # ords --config "$ORDS_CONFIG" install --db-pool "$1" --db-only \ + # --admin-user "$ords_admin" --db-user "$ords_user" --password-stdin <<< "${!2}" + fi + + return $_rc +} + +#------------------------------------------------------------------------------ +function get_apex_version() { + local -r _conn_string="${1}" + local -n _action="${2}" + local -i _rc=0 + + local -r _ver_sql="SELECT VERSION FROM DBA_REGISTRY WHERE COMP_ID='APEX';" + run_sql "${_conn_string}" "${_ver_sql}" "_db_apex_version" + _rc=$? + + if (( $_rc > 0 )); then + echo "FATAL: Unable to connect to ${_conn_string} to get APEX version" + dump_stack + return $_rc + fi + + local -r _db_apex_version=${_db_apex_version//[^0-9.]/} + echo "Database APEX Version: ${_db_apex_version:-Not Installed}" + + _action="none" + if [[ -z "${_db_apex_version}" ]]; then + echo "Installing APEX ${APEX_VER}" + _action="install" + elif compare_versions ${_db_apex_version} ${APEX_VER}; then + echo "Upgrading from ${_db_apex_version} to ${APEX_VER}" + _action="upgrade" + else + echo "No Installation/Upgrade Required" + fi + + return $_rc +} + +apex_upgrade() { + local -r _conn_string="${1}" + local -r _upgrade_key="${2}" + local -i _rc=0 + + if [[ -f ${APEX_HOME}/${APEX_VER}/apexins.sql ]] && [[ "${!_upgrade_key}" = "true" ]]; then + echo "Starting Installation of APEX ${APEX_VER}" + local -r _install_sql="@apxsilentins.sql SYSAUX SYSAUX TEMP /i/ ${config["dbsecret"]} ${config["dbsecret"]} ${config["dbsecret"]} ${config["dbsecret"]}" + run_sql "${_conn_string}" "${_install_sql}" "_install_output" + _rc=$? + echo "Installation Output: ${_install_output}" + fi + + return $_rc +} + +#------------------------------------------------------------------------------ +# INIT +#------------------------------------------------------------------------------ +declare -A pool_exit +for pool in "$ORDS_CONFIG"/databases/*; do + rc=0 + pool_name=$(basename "$pool") + pool_exit[${pool_name}]=0 + ords_cfg_cmd="ords --config $ORDS_CONFIG config --db-pool ${pool_name}" + echo "Found Pool: $pool_name..." + + declare -A config + for key in dbsecret dbadminusersecret dbcdbadminusersecret; do + var_key="${pool_name//-/_}_${key}" + echo "Obtaining value from initContainer variable: ${var_key}" + var_val="${!var_key}" + config[${key}]="${var_val}" + done + + # Set Secrets + set_secret "${pool_name}" "db.password" "${config["dbsecret"]}" + rc=$((rc + $?)) + set_secret "${pool_name}" "db.adminUser.password" "${config["dbadminusersecret"]}" + rc=$((rc + $?)) + set_secret "${pool_name}" "db.cdb.adminUser.password" "${config["dbcdbadminusersecret"]}" + rc=$((rc + $?)) + + if (( ${rc} > 0 )); then + echo "FATAL: Unable to set configuration for pool ${pool_name}" + dump_stack + pool_exit[${pool_name}]=1 + continue + elif [[ -z ${config["dbsecret"]} ]]; then + echo "FATAL: db.password must be specified for ${pool_name}" + dump_stack + pool_exit[${pool_name}]=1 + continue + elif [[ -z ${config["dbadminusersecret"]} ]]; then + echo "INFO: No additional configuration for ${pool_name}" + continue + fi + + get_conn_string "conn_string" + if [[ -z ${conn_string} ]]; then + echo "FATAL: Unable to get ${pool_name} database connect string" + dump_stack + pool_exit[${pool_name}]=1 + continue + fi + + check_adb "${conn_string}" "is_adb" + rc=$? + if (( ${rc} > 0 )); then + pool_exit[${pool_name}]=1 + continue + fi + + if (( is_adb )); then + # Create ORDS User + echo "Processing ADB in Pool: ${pool_name}" + create_adb_user "${conn_string}" "${pool_name}" + else + # APEX Upgrade + echo "---------------------------------------------------" + apex_upgrade_var=${pool_name}_autoupgrade_apex + if [[ ${!apex_upgrade_var} != "true" ]]; then + echo "APEX Install/Upgrade not requested for ${pool_name}" + continue + fi + + get_apex_version "${conn_string}" "action" + if [[ -z ${action} ]]; then + echo "FATAL: Unable to get ${pool_name} APEX Version" + dump_stack + pool_exit[${pool_name}]=1 + continue + fi + + if [[ ${action} != "none" ]]; then + apex_upgrade "${conn_string}" "${pool_name}_autoupgrade_apex" + if (( $? > 0 )); then + echo "FATAL: Unable to ${action} APEX for ${pool_name}" + dump_stack + pool_exit[${pool_name}]=1 + continue + fi + fi + + # ORDS Upgrade + ords_upgrade_var=${pool_name}_autoupgrade_ords + if [[ ${!ords_upgrade_var} != "true" ]]; then + echo "ORDS Install/Upgrade not requested for ${pool_name}" + continue + fi + + ords_upgrade "${pool_name}" "${pool_name}_autoupgrade_ords" + rc=$? + if (( $rc > 0 )); then + echo "FATAL: Unable to preform requested ORDS install/upgrade on ${pool_name}" + pool_exit[${pool_name}]=1 + dump_stack + continue + fi + fi +done + +for key in "${!pool_exit[@]}"; do + echo "Pool: $key, Exit Code: ${pool_exit[$key]}" + if (( ${pool_exit[$key]} > 0 )); then + rc=1 + fi +done + +exit $rc +#exit 0 diff --git a/ords/runOrdsSSL.sh b/ords/runOrdsSSL.sh index 35f1b77b..07e2b931 100644 --- a/ords/runOrdsSSL.sh +++ b/ords/runOrdsSSL.sh @@ -106,36 +106,42 @@ function setupOrds() { echo "====================================================" echo CONFIG=$CONFIG +echo $R1|sed 's/-----BEGIN PRIVATE KEY-----/-----BEGIN PRIVATE KEY-----\n/g'|\ + sed 's/-----END PRIVATE KEY-----/\n-----END PRIVATE KEY-----/' > $ORDS_HOME/k.txt + + export ORDS_LOGS=/tmp [ -f $ORDS_HOME/secrets/$WEBSERVER_USER_KEY ] && { - WEBSERVER_USER=`cat $ORDS_HOME/secrets/$WEBSERVER_USER_KEY` + WEBSERVER_USER=$(cat /opt/oracle/ords/secrets/${WEBSERVER_USER_KEY}|base64 --decode |openssl rsautl -decrypt -out swap -inkey $ORDS_HOME/k.txt -in - ; cat swap ;rm swap) } [ -f $ORDS_HOME/secrets/$WEBSERVER_PASSWORD_KEY ] && { - WEBSERVER_PASSWORD=`cat $ORDS_HOME/secrets/$WEBSERVER_PASSWORD_KEY` + WEBSERVER_PASSWORD=$(cat /opt/oracle/ords/secrets/${WEBSERVER_PASSWORD_KEY}|base64 --decode |openssl rsautl -decrypt -out swap -inkey $ORDS_HOME/k.txt -in - ; cat swap ;rm swap) } [ -f $ORDS_HOME/secrets/$CDBADMIN_USER_KEY ] && { - CDBADMIN_USER=`cat $ORDS_HOME/secrets/$CDBADMIN_USER_KEY` + CDBADMIN_USER=$(cat /opt/oracle/ords/secrets/${CDBADMIN_USER_KEY} | base64 --decode |openssl rsautl -decrypt -out swap -inkey $ORDS_HOME/k.txt -in - ; cat swap ;rm swap) } [ -f $ORDS_HOME/secrets/$CDBADMIN_PWD_KEY ] && { - CDBADMIN_PWD=`cat $ORDS_HOME/secrets/$CDBADMIN_PWD_KEY` + CDBADMIN_PWD=$(cat /opt/oracle/ords/secrets/${CDBADMIN_PWD_KEY} | base64 --decode |openssl rsautl -decrypt -out swap -inkey $ORDS_HOME/k.txt -in - ; cat swap ;rm swap) } [ -f $ORDS_HOME/secrets/$ORACLE_PWD_KEY ] && { - SYSDBA_PASSWORD=`cat $ORDS_HOME/secrets/$ORACLE_PWD_KEY` + #SYSDBA_PASSWORD=`cat $ORDS_HOME/secrets/$ORACLE_PWD_KEY` + SYSDBA_PASSWORD=$(cat $ORDS_HOME/secrets/${ORACLE_PWD_KEY} | base64 --decode |openssl rsautl -decrypt -out swap -inkey $ORDS_HOME/k.txt -in - ; cat swap ;rm swap) } [ -f $ORDS_HOME/secrets/$ORACLE_PWD_KEY ] && { - ORDS_PASSWORD=`cat $ORDS_HOME/secrets/$ORDS_PWD_KEY` + #ORDS_PASSWORD=`cat $ORDS_HOME/secrets/$ORDS_PWD_KEY` + ORDS_PASSWORD=$(cat $ORDS_HOME/secrets/${ORDS_PWD_KEY} | base64 --decode |openssl rsautl -decrypt -out swap -inkey $ORDS_HOME/k.txt -in - ; cat swap ;rm swap) } @@ -151,6 +157,7 @@ ${SYSDBA_PASSWORD:-PROVIDE_A_PASSWORD} ${ORDS_PASSWORD:-PROVIDE_A_PASSWORD} EOF +rm $ORDS_HOME/k.txt if [ $? -ne 0 ] diff --git a/test/e2e/autonomouscontainerdatabase_test.go b/test/e2e/autonomouscontainerdatabase_test.go index f27d8a6d..a76fc33f 100644 --- a/test/e2e/autonomouscontainerdatabase_test.go +++ b/test/e2e/autonomouscontainerdatabase_test.go @@ -49,8 +49,8 @@ import ( "k8s.io/apimachinery/pkg/types" dbv1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" - "github.com/oracle/oracle-database-operator/test/e2e/behavior" - "github.com/oracle/oracle-database-operator/test/e2e/util" + e2ebehavior "github.com/oracle/oracle-database-operator/test/e2e/behavior" + e2eutil "github.com/oracle/oracle-database-operator/test/e2e/util" // +kubebuilder:scaffold:imports ) @@ -81,7 +81,7 @@ var _ = Describe("test ACD binding", func() { CompartmentOCID: common.String(SharedCompartmentOCID), AutonomousExadataVMClusterOCID: common.String(SharedExadataVMClusterOCID), PatchModel: database.AutonomousContainerDatabasePatchModelUpdates, - OCIConfig: dbv1alpha1.OCIConfigSpec{ + OCIConfig: dbv1alpha1.OciConfigSpec{ ConfigMapName: common.String(SharedOCIConfigMapName), SecretName: common.String(SharedOCISecretName), }, @@ -119,7 +119,7 @@ var _ = Describe("test ACD binding", func() { }, Spec: dbv1alpha1.AutonomousContainerDatabaseSpec{ AutonomousContainerDatabaseOCID: common.String(acdID), - OCIConfig: dbv1alpha1.OCIConfigSpec{ + OCIConfig: dbv1alpha1.OciConfigSpec{ ConfigMapName: common.String(SharedOCIConfigMapName), SecretName: common.String(SharedOCISecretName), }, diff --git a/test/e2e/autonomousdatabase_controller_bind_test.go b/test/e2e/autonomousdatabase_controller_bind_test.go index 58de3356..48e60f0d 100644 --- a/test/e2e/autonomousdatabase_controller_bind_test.go +++ b/test/e2e/autonomousdatabase_controller_bind_test.go @@ -50,8 +50,8 @@ import ( "k8s.io/apimachinery/pkg/types" dbv1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" - "github.com/oracle/oracle-database-operator/test/e2e/behavior" - "github.com/oracle/oracle-database-operator/test/e2e/util" + e2ebehavior "github.com/oracle/oracle-database-operator/test/e2e/behavior" + e2eutil "github.com/oracle/oracle-database-operator/test/e2e/util" // +kubebuilder:scaffold:imports ) @@ -103,18 +103,18 @@ var _ = Describe("test ADB binding with hardLink=true", func() { }, Spec: dbv1alpha1.AutonomousDatabaseSpec{ Details: dbv1alpha1.AutonomousDatabaseDetails{ - AutonomousDatabaseOCID: adbID, - Wallet: dbv1alpha1.WalletSpec{ - Name: common.String(downloadedWallet), - Password: dbv1alpha1.PasswordSpec{ - K8sSecret: dbv1alpha1.K8sSecretSpec{ - Name: common.String(SharedWalletPassSecretName), - }, + Id: adbID, + }, + Wallet: dbv1alpha1.WalletSpec{ + Name: common.String(downloadedWallet), + Password: dbv1alpha1.PasswordSpec{ + K8sSecret: dbv1alpha1.K8sSecretSpec{ + Name: common.String(SharedWalletPassSecretName), }, }, }, HardLink: common.Bool(false), - OCIConfig: dbv1alpha1.OCIConfigSpec{ + OciConfig: dbv1alpha1.OciConfigSpec{ ConfigMapName: common.String(SharedOCIConfigMapName), SecretName: common.String(SharedOCISecretName), }, @@ -136,9 +136,9 @@ var _ = Describe("test ADB binding with hardLink=true", func() { It("Should restart ADB", e2ebehavior.UpdateAndAssertADBState(&k8sClient, &dbClient, &adbLookupKey, database.AutonomousDatabaseLifecycleStateAvailable)) - It("Should change to RESTRICTED network access", e2ebehavior.TestNetworkAccessRestricted(&k8sClient, &dbClient, &adbLookupKey, false)) + It("Should change to RESTRICTED network access", e2ebehavior.TestNetworkAccessRestricted(&k8sClient, &dbClient, &adbLookupKey, []string{"192.168.0.1"}, false)) - It("Should change isMTLSConnectionRequired to false", e2ebehavior.TestNetworkAccessRestricted(&k8sClient, &dbClient, &adbLookupKey, false)) + It("Should change isMTLSConnectionRequired to false", e2ebehavior.TestNetworkAccessRestricted(&k8sClient, &dbClient, &adbLookupKey, []string{"192.168.0.1"}, false)) It("Should should change to PRIVATE network access", e2ebehavior.TestNetworkAccessPrivate(&k8sClient, &dbClient, &adbLookupKey, false, &SharedSubnetOCID, &SharedNsgOCID)) @@ -162,18 +162,18 @@ var _ = Describe("test ADB binding with hardLink=true", func() { }, Spec: dbv1alpha1.AutonomousDatabaseSpec{ Details: dbv1alpha1.AutonomousDatabaseDetails{ - AutonomousDatabaseOCID: adbID, - Wallet: dbv1alpha1.WalletSpec{ - Name: common.String(downloadedWallet), - Password: dbv1alpha1.PasswordSpec{ - OCISecret: dbv1alpha1.OCISecretSpec{ - OCID: common.String(SharedInstanceWalletPasswordOCID), - }, + Id: adbID, + }, + Wallet: dbv1alpha1.WalletSpec{ + Name: common.String(downloadedWallet), + Password: dbv1alpha1.PasswordSpec{ + OciSecret: dbv1alpha1.OciSecretSpec{ + Id: common.String(SharedInstanceWalletPasswordOCID), }, }, }, HardLink: common.Bool(true), - OCIConfig: dbv1alpha1.OCIConfigSpec{ + OciConfig: dbv1alpha1.OciConfigSpec{ ConfigMapName: common.String(SharedOCIConfigMapName), SecretName: common.String(SharedOCISecretName), }, @@ -210,10 +210,10 @@ var _ = Describe("test ADB binding with hardLink=true", func() { }, Spec: dbv1alpha1.AutonomousDatabaseSpec{ Details: dbv1alpha1.AutonomousDatabaseDetails{ - AutonomousDatabaseOCID: &terminatedAdbID, + Id: &terminatedAdbID, }, HardLink: common.Bool(true), - OCIConfig: dbv1alpha1.OCIConfigSpec{ + OciConfig: dbv1alpha1.OciConfigSpec{ ConfigMapName: common.String(SharedOCIConfigMapName), SecretName: common.String(SharedOCISecretName), }, diff --git a/test/e2e/autonomousdatabase_controller_create_test.go b/test/e2e/autonomousdatabase_controller_create_test.go index 9cf0e7e7..cc7fd288 100644 --- a/test/e2e/autonomousdatabase_controller_create_test.go +++ b/test/e2e/autonomousdatabase_controller_create_test.go @@ -48,8 +48,8 @@ import ( "k8s.io/apimachinery/pkg/types" dbv1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" - "github.com/oracle/oracle-database-operator/test/e2e/behavior" - "github.com/oracle/oracle-database-operator/test/e2e/util" + e2ebehavior "github.com/oracle/oracle-database-operator/test/e2e/behavior" + e2eutil "github.com/oracle/oracle-database-operator/test/e2e/util" // +kubebuilder:scaffold:imports ) @@ -89,28 +89,30 @@ var _ = Describe("test ADB provisioning", func() { }, Spec: dbv1alpha1.AutonomousDatabaseSpec{ Details: dbv1alpha1.AutonomousDatabaseDetails{ - CompartmentOCID: common.String(SharedCompartmentOCID), - DbName: common.String(dbName), - DisplayName: common.String(dbName), - CPUCoreCount: common.Int(1), - AdminPassword: dbv1alpha1.PasswordSpec{ - K8sSecret: dbv1alpha1.K8sSecretSpec{ - Name: common.String(SharedAdminPassSecretName), - }, - }, - DataStorageSizeInTBs: common.Int(1), - IsAutoScalingEnabled: common.Bool(true), - Wallet: dbv1alpha1.WalletSpec{ - Name: common.String(downloadedWallet), - Password: dbv1alpha1.PasswordSpec{ + AutonomousDatabaseBase: dbv1alpha1.AutonomousDatabaseBase{ + CompartmentId: common.String(SharedCompartmentOCID), + DbName: common.String(dbName), + DisplayName: common.String(dbName), + CpuCoreCount: common.Int(1), + AdminPassword: dbv1alpha1.PasswordSpec{ K8sSecret: dbv1alpha1.K8sSecretSpec{ - Name: common.String(SharedWalletPassSecretName), + Name: common.String(SharedAdminPassSecretName), }, }, + DataStorageSizeInTBs: common.Int(1), + IsAutoScalingEnabled: common.Bool(true), + }, + }, + Wallet: dbv1alpha1.WalletSpec{ + Name: common.String(downloadedWallet), + Password: dbv1alpha1.PasswordSpec{ + K8sSecret: dbv1alpha1.K8sSecretSpec{ + Name: common.String(SharedWalletPassSecretName), + }, }, }, HardLink: common.Bool(true), - OCIConfig: dbv1alpha1.OCIConfigSpec{ + OciConfig: dbv1alpha1.OciConfigSpec{ ConfigMapName: common.String(SharedOCIConfigMapName), SecretName: common.String(SharedOCISecretName), }, @@ -134,20 +136,22 @@ var _ = Describe("test ADB provisioning", func() { }, Spec: dbv1alpha1.AutonomousDatabaseSpec{ Details: dbv1alpha1.AutonomousDatabaseDetails{ - CompartmentOCID: common.String(SharedCompartmentOCID), - DbName: common.String(dbName), - DisplayName: common.String(dbName), - CPUCoreCount: common.Int(1), - AdminPassword: dbv1alpha1.PasswordSpec{ - K8sSecret: dbv1alpha1.K8sSecretSpec{ - Name: common.String(SharedAdminPassSecretName), + AutonomousDatabaseBase: dbv1alpha1.AutonomousDatabaseBase{ + CompartmentId: common.String(SharedCompartmentOCID), + DbName: common.String(dbName), + DisplayName: common.String(dbName), + CpuCoreCount: common.Int(1), + AdminPassword: dbv1alpha1.PasswordSpec{ + K8sSecret: dbv1alpha1.K8sSecretSpec{ + Name: common.String(SharedAdminPassSecretName), + }, }, + DataStorageSizeInTBs: common.Int(1), + IsAutoScalingEnabled: common.Bool(true), }, - DataStorageSizeInTBs: common.Int(1), - IsAutoScalingEnabled: common.Bool(true), }, HardLink: common.Bool(true), - OCIConfig: dbv1alpha1.OCIConfigSpec{ + OciConfig: dbv1alpha1.OciConfigSpec{ ConfigMapName: common.String(SharedOCIConfigMapName), SecretName: common.String(SharedOCISecretName), }, @@ -179,7 +183,7 @@ var _ = Describe("test ADB provisioning", func() { // Get adb ocid adb := &dbv1alpha1.AutonomousDatabase{} Expect(k8sClient.Get(context.TODO(), adbLookupKey, adb)).To(Succeed()) - databaseOCID := adb.Spec.Details.AutonomousDatabaseOCID + databaseOCID := adb.Spec.Details.Id tnsEntry := dbName + "_high" err := e2ebehavior.ConfigureADBBackup(&dbClient, databaseOCID, &tnsEntry, &SharedPlainTextAdminPassword, &SharedPlainTextWalletPassword, &SharedBucketUrl, &SharedAuthToken, &SharedOciUser) Expect(err).ShouldNot(HaveOccurred()) @@ -195,12 +199,12 @@ var _ = Describe("test ADB provisioning", func() { }, Spec: dbv1alpha1.AutonomousDatabaseBackupSpec{ Target: dbv1alpha1.TargetSpec{ - OCIADB: dbv1alpha1.OCIADBSpec{ - OCID: common.String(*databaseOCID), + OciAdb: dbv1alpha1.OciAdbSpec{ + Ocid: common.String(*databaseOCID), }, }, DisplayName: common.String(backupName), - OCIConfig: dbv1alpha1.OCIConfigSpec{ + OCIConfig: dbv1alpha1.OciConfigSpec{ ConfigMapName: common.String(SharedOCIConfigMapName), SecretName: common.String(SharedOCISecretName), }, @@ -227,16 +231,16 @@ var _ = Describe("test ADB provisioning", func() { }, Spec: dbv1alpha1.AutonomousDatabaseRestoreSpec{ Target: dbv1alpha1.TargetSpec{ - K8sADB: dbv1alpha1.K8sADBSpec{ + K8sAdb: dbv1alpha1.K8sAdbSpec{ Name: common.String(resourceName), }, }, Source: dbv1alpha1.SourceSpec{ - K8sADBBackup: dbv1alpha1.K8sADBBackupSpec{ + K8sAdbBackup: dbv1alpha1.K8sAdbBackupSpec{ Name: common.String(backupName), }, }, - OCIConfig: dbv1alpha1.OCIConfigSpec{ + OCIConfig: dbv1alpha1.OciConfigSpec{ ConfigMapName: common.String(SharedOCIConfigMapName), SecretName: common.String(SharedOCISecretName), }, @@ -273,29 +277,30 @@ var _ = Describe("test ADB provisioning", func() { }, Spec: dbv1alpha1.AutonomousDatabaseSpec{ Details: dbv1alpha1.AutonomousDatabaseDetails{ - CompartmentOCID: common.String(SharedCompartmentOCID), - DbName: common.String(dbName), - DisplayName: common.String(dbName), - CPUCoreCount: common.Int(1), - AdminPassword: dbv1alpha1.PasswordSpec{ - OCISecret: dbv1alpha1.OCISecretSpec{ - OCID: common.String(SharedAdminPasswordOCID), + AutonomousDatabaseBase: dbv1alpha1.AutonomousDatabaseBase{ + CompartmentId: common.String(SharedCompartmentOCID), + DbName: common.String(dbName), + DisplayName: common.String(dbName), + CpuCoreCount: common.Int(1), + AdminPassword: dbv1alpha1.PasswordSpec{ + OciSecret: dbv1alpha1.OciSecretSpec{ + Id: common.String(SharedAdminPasswordOCID), + }, }, + DataStorageSizeInTBs: common.Int(1), + IsAutoScalingEnabled: common.Bool(true), }, - DataStorageSizeInTBs: common.Int(1), - IsAutoScalingEnabled: common.Bool(true), - - Wallet: dbv1alpha1.WalletSpec{ - Name: common.String(downloadedWallet), - Password: dbv1alpha1.PasswordSpec{ - OCISecret: dbv1alpha1.OCISecretSpec{ - OCID: common.String(SharedInstanceWalletPasswordOCID), - }, + }, + Wallet: dbv1alpha1.WalletSpec{ + Name: common.String(downloadedWallet), + Password: dbv1alpha1.PasswordSpec{ + OciSecret: dbv1alpha1.OciSecretSpec{ + Id: common.String(SharedInstanceWalletPasswordOCID), }, }, }, HardLink: common.Bool(true), - OCIConfig: dbv1alpha1.OCIConfigSpec{ + OciConfig: dbv1alpha1.OciConfigSpec{ ConfigMapName: common.String(SharedOCIConfigMapName), SecretName: common.String(SharedOCISecretName), }, diff --git a/test/e2e/behavior/shared_behaviors.go b/test/e2e/behavior/shared_behaviors.go index 00fc02c9..3d87ce94 100644 --- a/test/e2e/behavior/shared_behaviors.go +++ b/test/e2e/behavior/shared_behaviors.go @@ -55,11 +55,12 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - dbv1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" - "github.com/oracle/oracle-database-operator/test/e2e/util" "os" "os/exec" "strings" + + dbv1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" + e2eutil "github.com/oracle/oracle-database-operator/test/e2e/util" ) /************************************************************** @@ -110,11 +111,11 @@ func AssertProvision(k8sClient *client.Client, adbLookupKey *types.NamespacedNam return nil, err } - return createdADB.Spec.Details.AutonomousDatabaseOCID, nil + return createdADB.Spec.Details.Id, nil }, provisionTimeout, intervalTime).ShouldNot(BeNil()) fmt.Fprintf(GinkgoWriter, "AutonomousDatabase DbName = %s, and AutonomousDatabaseOCID = %s\n", - *createdADB.Spec.Details.DbName, *createdADB.Spec.Details.AutonomousDatabaseOCID) + *createdADB.Spec.Details.DbName, *createdADB.Spec.Details.Id) } } @@ -138,13 +139,13 @@ func AssertBind(k8sClient *client.Client, adbLookupKey *types.NamespacedName) fu if err != nil { return false } - return (boundADB.Spec.Details.CompartmentOCID != nil && + return (boundADB.Spec.Details.CompartmentId != nil && boundADB.Spec.Details.DbWorkload != "" && boundADB.Spec.Details.DbName != nil) }, bindTimeout).Should(Equal(true), "Attributes in the resource should not be empty") fmt.Fprintf(GinkgoWriter, "AutonomousDatabase DbName = %s, and AutonomousDatabaseOCID = %s\n", - *boundADB.Spec.Details.DbName, *boundADB.Spec.Details.AutonomousDatabaseOCID) + *boundADB.Spec.Details.DbName, *boundADB.Spec.Details.Id) } } @@ -163,10 +164,10 @@ func AssertWallet(k8sClient *client.Client, adbLookupKey *types.NamespacedName) Expect(derefK8sClient.Get(context.TODO(), *adbLookupKey, adb)).To(Succeed()) // The default name is xxx-instance-wallet - if adb.Spec.Details.Wallet.Name == nil { + if adb.Spec.Wallet.Name == nil { walletName = adb.Name + "-instance-wallet" } else { - walletName = *adb.Spec.Details.Wallet.Name + walletName = *adb.Spec.Wallet.Name } By("Checking the wallet secret " + walletName + " is created and is not empty") @@ -249,7 +250,7 @@ func UpdateDetails(k8sClient *client.Client, dbClient *database.DatabaseClient, // , the List request returns PROVISIONING state. In this case the update request will fail with // conflict state error. Eventually(func() (database.AutonomousDatabaseLifecycleStateEnum, error) { - listResp, err := e2eutil.ListAutonomousDatabases(derefDBClient, expectedADB.Spec.Details.CompartmentOCID, expectedADB.Spec.Details.DisplayName) + listResp, err := e2eutil.ListAutonomousDatabases(derefDBClient, expectedADB.Spec.Details.CompartmentId, expectedADB.Spec.Details.DisplayName) if err != nil { return "", err } @@ -265,7 +266,7 @@ func UpdateDetails(k8sClient *client.Client, dbClient *database.DatabaseClient, var newDisplayName = *expectedADB.Spec.Details.DisplayName + "_new" var newCPUCoreCount int - if *expectedADB.Spec.Details.CPUCoreCount == 1 { + if *expectedADB.Spec.Details.CpuCoreCount == 1 { newCPUCoreCount = 2 } else { newCPUCoreCount = 1 @@ -278,7 +279,7 @@ func UpdateDetails(k8sClient *client.Client, dbClient *database.DatabaseClient, newDisplayName, newCPUCoreCount, newKey, newVal)) expectedADB.Spec.Details.DisplayName = common.String(newDisplayName) - expectedADB.Spec.Details.CPUCoreCount = common.Int(newCPUCoreCount) + expectedADB.Spec.Details.CpuCoreCount = common.Int(newCPUCoreCount) expectedADB.Spec.Details.FreeformTags = map[string]string{newKey: newVal} expectedADB.Spec.Details.AdminPassword.K8sSecret.Name = common.String(newSecretName) @@ -307,18 +308,18 @@ func AssertADBDetails(k8sClient *client.Client, Eventually(func() (bool, error) { // Fetch the ADB from OCI when it's in AVAILABLE state, and retry if its attributes doesn't match the new ADB's attributes retryPolicy := e2eutil.NewLifecycleStateRetryPolicyADB(database.AutonomousDatabaseLifecycleStateAvailable) - resp, err := e2eutil.GetAutonomousDatabase(derefDBClient, expectedADB.Spec.Details.AutonomousDatabaseOCID, &retryPolicy) + resp, err := e2eutil.GetAutonomousDatabase(derefDBClient, expectedADB.Spec.Details.Id, &retryPolicy) if err != nil { return false, err } debug := false if debug { - if !compareString(expectedADBDetails.AutonomousDatabaseOCID, resp.AutonomousDatabase.Id) { - fmt.Fprintf(GinkgoWriter, "Expected OCID: %v\nGot: %v\n", expectedADBDetails.AutonomousDatabaseOCID, resp.AutonomousDatabase.Id) + if !compareString(expectedADBDetails.Id, resp.AutonomousDatabase.Id) { + fmt.Fprintf(GinkgoWriter, "Expected OCID: %v\nGot: %v\n", expectedADBDetails.Id, resp.AutonomousDatabase.Id) } - if !compareString(expectedADBDetails.CompartmentOCID, resp.AutonomousDatabase.CompartmentId) { - fmt.Fprintf(GinkgoWriter, "Expected CompartmentOCID: %v\nGot: %v\n", expectedADBDetails.CompartmentOCID, resp.CompartmentId) + if !compareString(expectedADBDetails.CompartmentId, resp.AutonomousDatabase.CompartmentId) { + fmt.Fprintf(GinkgoWriter, "Expected CompartmentOCID: %v\nGot: %v\n", expectedADBDetails.CompartmentId, resp.CompartmentId) } if !compareString(expectedADBDetails.DisplayName, resp.AutonomousDatabase.DisplayName) { fmt.Fprintf(GinkgoWriter, "Expected DisplayName: %v\nGot: %v\n", expectedADBDetails.DisplayName, resp.AutonomousDatabase.DisplayName) @@ -338,8 +339,8 @@ func AssertADBDetails(k8sClient *client.Client, if !compareInt(expectedADBDetails.DataStorageSizeInTBs, resp.AutonomousDatabase.DataStorageSizeInTBs) { fmt.Fprintf(GinkgoWriter, "Expected DataStorageSize: %v\nGot: %v\n", expectedADBDetails.DataStorageSizeInTBs, resp.AutonomousDatabase.DataStorageSizeInTBs) } - if !compareInt(expectedADBDetails.CPUCoreCount, resp.AutonomousDatabase.CpuCoreCount) { - fmt.Fprintf(GinkgoWriter, "Expected CPUCoreCount: %v\nGot: %v\n", expectedADBDetails.CPUCoreCount, resp.AutonomousDatabase.CpuCoreCount) + if !compareInt(expectedADBDetails.CpuCoreCount, resp.AutonomousDatabase.CpuCoreCount) { + fmt.Fprintf(GinkgoWriter, "Expected CPUCoreCount: %v\nGot: %v\n", expectedADBDetails.CpuCoreCount, resp.AutonomousDatabase.CpuCoreCount) } if !compareBool(expectedADBDetails.IsAutoScalingEnabled, resp.AutonomousDatabase.IsAutoScalingEnabled) { fmt.Fprintf(GinkgoWriter, "Expected IsAutoScalingEnabled: %v\nGot: %v\n", expectedADBDetails.IsAutoScalingEnabled, resp.AutonomousDatabase.IsAutoScalingEnabled) @@ -347,23 +348,23 @@ func AssertADBDetails(k8sClient *client.Client, if !compareStringMap(expectedADBDetails.FreeformTags, resp.AutonomousDatabase.FreeformTags) { fmt.Fprintf(GinkgoWriter, "Expected FreeformTags: %v\nGot: %v\n", expectedADBDetails.FreeformTags, resp.AutonomousDatabase.FreeformTags) } - if !compareBool(expectedADBDetails.NetworkAccess.IsAccessControlEnabled, resp.AutonomousDatabase.IsAccessControlEnabled) { - fmt.Fprintf(GinkgoWriter, "Expected IsAccessControlEnabled: %v\nGot: %v\n", expectedADBDetails.NetworkAccess.IsAccessControlEnabled, resp.AutonomousDatabase.IsAccessControlEnabled) + if !compareBool(expectedADBDetails.IsAccessControlEnabled, resp.AutonomousDatabase.IsAccessControlEnabled) { + fmt.Fprintf(GinkgoWriter, "Expected IsAccessControlEnabled: %v\nGot: %v\n", expectedADBDetails.IsAccessControlEnabled, resp.AutonomousDatabase.IsAccessControlEnabled) } - if !reflect.DeepEqual(expectedADBDetails.NetworkAccess.AccessControlList, resp.AutonomousDatabase.WhitelistedIps) { - fmt.Fprintf(GinkgoWriter, "Expected AccessControlList: %v\nGot: %v\n", expectedADBDetails.NetworkAccess.AccessControlList, resp.AutonomousDatabase.WhitelistedIps) + if !reflect.DeepEqual(expectedADBDetails.WhitelistedIps, resp.AutonomousDatabase.WhitelistedIps) { + fmt.Fprintf(GinkgoWriter, "Expected AccessControlList: %v\nGot: %v\n", expectedADBDetails.WhitelistedIps, resp.AutonomousDatabase.WhitelistedIps) } - if !compareBool(expectedADBDetails.NetworkAccess.IsMTLSConnectionRequired, resp.AutonomousDatabase.IsMtlsConnectionRequired) { - fmt.Fprintf(GinkgoWriter, "Expected IsMTLSConnectionRequired: %v\nGot: %v\n", expectedADBDetails.NetworkAccess.IsMTLSConnectionRequired, resp.AutonomousDatabase.IsMtlsConnectionRequired) + if !compareBool(expectedADBDetails.IsMtlsConnectionRequired, resp.AutonomousDatabase.IsMtlsConnectionRequired) { + fmt.Fprintf(GinkgoWriter, "Expected IsMTLSConnectionRequired: %v\nGot: %v\n", expectedADBDetails.IsMtlsConnectionRequired, resp.AutonomousDatabase.IsMtlsConnectionRequired) } - if !compareString(expectedADBDetails.NetworkAccess.PrivateEndpoint.SubnetOCID, resp.AutonomousDatabase.SubnetId) { - fmt.Fprintf(GinkgoWriter, "Expected SubnetOCID: %v\nGot: %v\n", expectedADBDetails.NetworkAccess.PrivateEndpoint.SubnetOCID, resp.AutonomousDatabase.SubnetId) + if !compareString(expectedADBDetails.SubnetId, resp.AutonomousDatabase.SubnetId) { + fmt.Fprintf(GinkgoWriter, "Expected SubnetOCID: %v\nGot: %v\n", expectedADBDetails.SubnetId, resp.AutonomousDatabase.SubnetId) } - if !reflect.DeepEqual(expectedADBDetails.NetworkAccess.PrivateEndpoint.NsgOCIDs, resp.AutonomousDatabase.NsgIds) { - fmt.Fprintf(GinkgoWriter, "Expected NsgOCIDs: %v\nGot: %v\n", expectedADBDetails.NetworkAccess.PrivateEndpoint.NsgOCIDs, resp.AutonomousDatabase.NsgIds) + if !reflect.DeepEqual(expectedADBDetails.NsgIds, resp.AutonomousDatabase.NsgIds) { + fmt.Fprintf(GinkgoWriter, "Expected NsgOCIDs: %v\nGot: %v\n", expectedADBDetails.NsgIds, resp.AutonomousDatabase.NsgIds) } - if !compareString(expectedADBDetails.NetworkAccess.PrivateEndpoint.HostnamePrefix, resp.AutonomousDatabase.PrivateEndpointLabel) { - fmt.Fprintf(GinkgoWriter, "Expected HostnamePrefix: %v\nGot: %v\n", expectedADBDetails.NetworkAccess.PrivateEndpoint.HostnamePrefix, resp.AutonomousDatabase.PrivateEndpointLabel) + if !compareString(expectedADBDetails.PrivateEndpointLabel, resp.AutonomousDatabase.PrivateEndpointLabel) { + fmt.Fprintf(GinkgoWriter, "Expected PrivateEndpointLabel: %v\nGot: %v\n", expectedADBDetails.PrivateEndpointLabel, resp.AutonomousDatabase.PrivateEndpointLabel) } } @@ -371,23 +372,23 @@ func AssertADBDetails(k8sClient *client.Client, // (e.g. adminPassword, wallet) are missing from e2eutil.GetAutonomousDatabase(). // We don't compare LifecycleState in this case. We only make sure that the ADB is in AVAIABLE state before // proceeding to the next test. - same := compareString(expectedADBDetails.AutonomousDatabaseOCID, resp.AutonomousDatabase.Id) && - compareString(expectedADBDetails.CompartmentOCID, resp.AutonomousDatabase.CompartmentId) && + same := compareString(expectedADBDetails.Id, resp.AutonomousDatabase.Id) && + compareString(expectedADBDetails.CompartmentId, resp.AutonomousDatabase.CompartmentId) && compareString(expectedADBDetails.DisplayName, resp.AutonomousDatabase.DisplayName) && compareString(expectedADBDetails.DbName, resp.AutonomousDatabase.DbName) && expectedADBDetails.DbWorkload == resp.AutonomousDatabase.DbWorkload && compareBool(expectedADBDetails.IsDedicated, resp.AutonomousDatabase.IsDedicated) && compareString(expectedADBDetails.DbVersion, resp.AutonomousDatabase.DbVersion) && compareInt(expectedADBDetails.DataStorageSizeInTBs, resp.AutonomousDatabase.DataStorageSizeInTBs) && - compareInt(expectedADBDetails.CPUCoreCount, resp.AutonomousDatabase.CpuCoreCount) && + compareInt(expectedADBDetails.CpuCoreCount, resp.AutonomousDatabase.CpuCoreCount) && compareBool(expectedADBDetails.IsAutoScalingEnabled, resp.AutonomousDatabase.IsAutoScalingEnabled) && compareStringMap(expectedADBDetails.FreeformTags, resp.AutonomousDatabase.FreeformTags) && - compareBool(expectedADBDetails.NetworkAccess.IsAccessControlEnabled, resp.AutonomousDatabase.IsAccessControlEnabled) && - reflect.DeepEqual(expectedADBDetails.NetworkAccess.AccessControlList, resp.AutonomousDatabase.WhitelistedIps) && - compareBool(expectedADBDetails.NetworkAccess.IsMTLSConnectionRequired, resp.AutonomousDatabase.IsMtlsConnectionRequired) && - compareString(expectedADBDetails.NetworkAccess.PrivateEndpoint.SubnetOCID, resp.AutonomousDatabase.SubnetId) && - reflect.DeepEqual(expectedADBDetails.NetworkAccess.PrivateEndpoint.NsgOCIDs, resp.AutonomousDatabase.NsgIds) && - compareString(expectedADBDetails.NetworkAccess.PrivateEndpoint.HostnamePrefix, resp.AutonomousDatabase.PrivateEndpointLabel) + compareBool(expectedADBDetails.IsAccessControlEnabled, resp.AutonomousDatabase.IsAccessControlEnabled) && + reflect.DeepEqual(expectedADBDetails.WhitelistedIps, resp.AutonomousDatabase.WhitelistedIps) && + compareBool(expectedADBDetails.IsMtlsConnectionRequired, resp.AutonomousDatabase.IsMtlsConnectionRequired) && + compareString(expectedADBDetails.SubnetId, resp.AutonomousDatabase.SubnetId) && + reflect.DeepEqual(expectedADBDetails.NsgIds, resp.AutonomousDatabase.NsgIds) && + compareString(expectedADBDetails.PrivateEndpointLabel, resp.AutonomousDatabase.PrivateEndpointLabel) return same, nil }, updateADBTimeout, intervalTime).Should(BeTrue()) @@ -398,15 +399,9 @@ func AssertADBDetails(k8sClient *client.Client, } } -func TestNetworkAccessRestricted(k8sClient *client.Client, dbClient *database.DatabaseClient, adbLookupKey *types.NamespacedName, isMTLSConnectionRequired bool) func() { +func TestNetworkAccessRestricted(k8sClient *client.Client, dbClient *database.DatabaseClient, adbLookupKey *types.NamespacedName, acl []string, isMTLSConnectionRequired bool) func() { return func() { - networkRestrictedSpec := dbv1alpha1.NetworkAccessSpec{ - AccessType: dbv1alpha1.NetworkAccessTypeRestricted, - IsMTLSConnectionRequired: common.Bool(isMTLSConnectionRequired), - AccessControlList: []string{"192.168.0.1"}, - } - - TestNetworkAccess(k8sClient, dbClient, adbLookupKey, networkRestrictedSpec)() + TestNetworkAccess(k8sClient, dbClient, adbLookupKey, nil, nil, acl, isMTLSConnectionRequired)() } } @@ -450,33 +445,17 @@ func TestNetworkAccessPrivate(k8sClient *client.Client, dbClient *database.Datab derefK8sClient := *k8sClient Expect(derefK8sClient.Get(context.TODO(), *adbLookupKey, adb)).Should(Succeed()) - networkPrivateSpec := dbv1alpha1.NetworkAccessSpec{ - AccessType: dbv1alpha1.NetworkAccessTypePrivate, - AccessControlList: []string{}, - IsMTLSConnectionRequired: common.Bool(isMTLSConnectionRequired), - PrivateEndpoint: dbv1alpha1.PrivateEndpointSpec{ - HostnamePrefix: adb.Spec.Details.DbName, - NsgOCIDs: []string{*nsgOCIDs}, - SubnetOCID: common.String(*subnetOCID), - }, - } - - TestNetworkAccess(k8sClient, dbClient, adbLookupKey, networkPrivateSpec)() + TestNetworkAccess(k8sClient, dbClient, adbLookupKey, subnetOCID, nsgOCIDs, nil, isMTLSConnectionRequired)() } } func TestNetworkAccessPublic(k8sClient *client.Client, dbClient *database.DatabaseClient, adbLookupKey *types.NamespacedName) func() { return func() { - networkPublicSpec := dbv1alpha1.NetworkAccessSpec{ - AccessType: dbv1alpha1.NetworkAccessTypePublic, - IsMTLSConnectionRequired: common.Bool(true), - } - - TestNetworkAccess(k8sClient, dbClient, adbLookupKey, networkPublicSpec)() + TestNetworkAccess(k8sClient, dbClient, adbLookupKey, nil, nil, nil, true)() } } -func TestNetworkAccess(k8sClient *client.Client, dbClient *database.DatabaseClient, adbLookupKey *types.NamespacedName, networkSpec dbv1alpha1.NetworkAccessSpec) func() { +func TestNetworkAccess(k8sClient *client.Client, dbClient *database.DatabaseClient, adbLookupKey *types.NamespacedName, subnetOCID *string, nsgOCIDs *string, acl []string, isMTLSConnectionRequired bool) func() { return func() { Expect(k8sClient).NotTo(BeNil()) Expect(dbClient).NotTo(BeNil()) @@ -488,7 +467,10 @@ func TestNetworkAccess(k8sClient *client.Client, dbClient *database.DatabaseClie AssertADBState(k8sClient, dbClient, adbLookupKey, database.AutonomousDatabaseLifecycleStateAvailable)() Expect(derefK8sClient.Get(context.TODO(), *adbLookupKey, adb)).To(Succeed()) - adb.Spec.Details.NetworkAccess = networkSpec + adb.Spec.Details.SubnetId = subnetOCID + adb.Spec.Details.NsgIds = []string{*nsgOCIDs} + adb.Spec.Details.WhitelistedIps = acl + adb.Spec.Details.IsMtlsConnectionRequired = common.Bool(isMTLSConnectionRequired) Expect(derefK8sClient.Update(context.TODO(), adb)).To(Succeed()) AssertADBDetails(k8sClient, dbClient, adbLookupKey, adb)() } @@ -503,7 +485,7 @@ func UpdateAndAssertDetails(k8sClient *client.Client, dbClient *database.Databas expectedADB := UpdateDetails(k8sClient, dbClient, adbLookupKey, newSecretName, newAdminPassword)() AssertADBDetails(k8sClient, dbClient, adbLookupKey, expectedADB)() - ocid := expectedADB.Spec.Details.AutonomousDatabaseOCID + ocid := expectedADB.Spec.Details.Id tnsEntry := *expectedADB.Spec.Details.DbName + "_high" err := AssertAdminPassword(dbClient, ocid, &tnsEntry, newAdminPassword, walletPassword) Expect(err).ShouldNot(HaveOccurred()) @@ -547,7 +529,7 @@ func AssertHardLinkDelete(k8sClient *client.Client, dbClient *database.DatabaseC // Check every 10 secs for total 60 secs Eventually(func() (database.AutonomousDatabaseLifecycleStateEnum, error) { retryPolicy := e2eutil.NewLifecycleStateRetryPolicyADB(database.AutonomousDatabaseLifecycleStateTerminating) - return returnADBRemoteState(derefK8sClient, derefDBClient, adb.Spec.Details.AutonomousDatabaseOCID, &retryPolicy) + return returnADBRemoteState(derefK8sClient, derefDBClient, adb.Spec.Details.Id, &retryPolicy) }, changeTimeout).Should(Equal(database.AutonomousDatabaseLifecycleStateTerminating)) AssertSoftLinkDelete(k8sClient, adbLookupKey)() @@ -606,7 +588,7 @@ func AssertADBRemoteState(k8sClient *client.Client, dbClient *database.DatabaseC adb := &dbv1alpha1.AutonomousDatabase{} Expect(derefK8sClient.Get(context.TODO(), *adbLookupKey, adb)).To(Succeed()) By("Checking if the lifecycleState of remote resource is " + string(state)) - AssertADBRemoteStateOCID(k8sClient, dbClient, adb.Spec.Details.AutonomousDatabaseOCID, state, changeTimeout)() + AssertADBRemoteStateOCID(k8sClient, dbClient, adb.Spec.Details.Id, state, changeTimeout)() } } @@ -622,7 +604,7 @@ func AssertADBRemoteStateForBackupRestore(k8sClient *client.Client, dbClient *da adb := &dbv1alpha1.AutonomousDatabase{} Expect(derefK8sClient.Get(context.TODO(), *adbLookupKey, adb)).To(Succeed()) By("Checking if the lifecycleState of remote resource is " + string(state)) - AssertADBRemoteStateOCID(k8sClient, dbClient, adb.Spec.Details.AutonomousDatabaseOCID, state, backupTimeout)() + AssertADBRemoteStateOCID(k8sClient, dbClient, adb.Spec.Details.Id, state, backupTimeout)() } } @@ -655,8 +637,15 @@ func UpdateState(k8sClient *client.Client, adbLookupKey *types.NamespacedName, s adb := &dbv1alpha1.AutonomousDatabase{} Expect(derefK8sClient.Get(context.TODO(), *adbLookupKey, adb)).To(Succeed()) - adb.Spec.Details.LifecycleState = state By("Updating adb state to " + string(state)) + switch state { + case database.AutonomousDatabaseLifecycleStateAvailable: + adb.Spec.Action = "Start" + case database.AutonomousDatabaseLifecycleStateStopped: + adb.Spec.Action = "Stop" + case database.AutonomousDatabaseLifecycleStateTerminated: + adb.Spec.Action = "Terminate" + } Expect(derefK8sClient.Update(context.TODO(), adb)).To(Succeed()) } } diff --git a/test/e2e/suite_test.go b/test/e2e/suite_test.go index a9fa06e5..9d9914f7 100644 --- a/test/e2e/suite_test.go +++ b/test/e2e/suite_test.go @@ -62,7 +62,7 @@ import ( databasev1alpha1 "github.com/oracle/oracle-database-operator/apis/database/v1alpha1" controllers "github.com/oracle/oracle-database-operator/controllers/database" - "github.com/oracle/oracle-database-operator/test/e2e/util" + e2eutil "github.com/oracle/oracle-database-operator/test/e2e/util" // +kubebuilder:scaffold:imports ) @@ -309,9 +309,9 @@ var _ = AfterSuite(func() { By(fmt.Sprintf("Found %d AutonomousDatabase(s)", len(adbList.Items))) for _, adb := range adbList.Items { - if adb.Spec.Details.AutonomousDatabaseOCID != nil { + if adb.Spec.Details.Id != nil { By("Terminating database " + *adb.Spec.Details.DbName) - Expect(e2eutil.DeleteAutonomousDatabase(dbClient, adb.Spec.Details.AutonomousDatabaseOCID)).Should(Succeed()) + Expect(e2eutil.DeleteAutonomousDatabase(dbClient, adb.Spec.Details.Id)).Should(Succeed()) } } From ffb4024b00c32eec111d63b694f8fd17abac2630 Mon Sep 17 00:00:00 2001 From: matteo malvezzi Date: Thu, 27 Mar 2025 09:00:04 +0100 Subject: [PATCH 17/24] lrest doc correction (#165) Co-authored-by: oracle database --- docs/multitenant/lrest-based/README.md | 31 ++++++------- docs/multitenant/lrest-based/usecase/makefile | 44 +++++++++---------- 2 files changed, 38 insertions(+), 37 deletions(-) diff --git a/docs/multitenant/lrest-based/README.md b/docs/multitenant/lrest-based/README.md index f17abd41..d9b72a9d 100644 --- a/docs/multitenant/lrest-based/README.md +++ b/docs/multitenant/lrest-based/README.md @@ -274,10 +274,11 @@ Parsing sqltext=select count(*) from pdb_plug_in_violations where name =:b1 | Name | Dcription | --------------------------|-------------------------------------------------------------------------------| |cdbName | Name of the container database (db) | -|lrestImage (DO NOT EDIT) | **container-registry.oracle.com/database/lrest-dboper:latest** | -|dbTnsurl | TNS alias of the container db | +|lrestImage (DO NOT EDIT) | **container-registry.oracle.com/database/lrest-dboper:latest** use the latest label availble on OCR | +|dbTnsurl | The string of the tns alias to connect to cdb. Attention: remove all white space from string | |deletePdbCascade | Delete all of the PDBs associated to a CDB resource when the CDB resource is dropped using [imperative approach](https://kubernetes.io/docs/tasks/manage-kubernetes-objects/imperative-command/) | |cdbAdminUser | Secret: the administrative (admin) user | +|fileNameConversions | Use file name conversion if you are not using ASM | |cdbAdminPwd | Secret: the admin user password | |webServerUser | Secret: the HTTPS user | |webServerPwd | Secret: the HTTPS user password | @@ -290,10 +291,10 @@ Parsing sqltext=select count(*) from pdb_plug_in_violations where name =:b1 ### Create PDB -To create a pluggable database (PDB), apply the yaml file [`create_lrpdb1_resource.yaml`](./usecase/clone_lrpdb1_resource.yaml) +To create a pluggable database, apply the yaml file [`create_pdb1_resource.yaml`](./usecase/create_pdb1_resource.yaml) ```bash -kubectl apply -f create_lrpdb1_resource.yaml +kubectl apply -f create_pdb1_resource.yaml ``` Check the status of the resource and the PDB existence on the container db: @@ -391,10 +392,10 @@ test_invalid_parameter;16;spfile ### Open PDB -To open the PDB, use the file [`open_lrpdb1_resource.yaml`](./usecase/open_lrpdb1_resource.yaml): +To open the PDB, use the file [`open_pdb1_resource.yaml`](./usecase/open_pdb1_resource.yaml): ```bash -kubectl apply -f open_lrpdb1_resource.yaml +kubectl apply -f open_pdb1_resource.yaml ``` **pdb opening** - parameters list @@ -411,10 +412,10 @@ kubectl apply -f open_lrpdb1_resource.yaml ### Close PDB -To close the PDB, use the file [`close_lrpdb1_resource.yaml`](./usecase/close_lrpdb1_resource.yaml): +To close the PDB, use the file [`close_pdb1_resource.yaml`](./usecase/close_pdb1_resource.yaml): ```bash -kubectl apply -f close_lrpdb1_resource.yaml +kubectl apply -f close_pdb1_resource.yaml ``` **pdb closing** - parameters list | Name | Description/Value | @@ -429,10 +430,10 @@ kubectl apply -f close_lrpdb1_resource.yaml ### Clone PDB ### -To clone the PDB, use the file [`clone_lrpdb1_resource.yaml`](./usecase/clone_lrpdb1_resource.yaml): +To clone the PDB, use the file [`clone_pdb1_resource.yaml`](./usecase/clone_pdb1_resource.yaml): ```bash -kubeclt apply -f clone_lrpdb1_resource.yaml +kubeclt apply -f clone_pdb1_resource.yaml ``` **pdb cloning** - parameters list | Name | Description/Value | @@ -450,7 +451,7 @@ kubeclt apply -f clone_lrpdb1_resource.yaml ### Unplug PDB -To unplug the PDB, use the file [`unplug_lrpdb1_resource.yaml`](./usecase/unplug_lrpdb1_resource.yaml): +To unplug the PDB, use the file [`unplug_pdb1_resource.yaml`](./usecase/unplug_pdb1_resource.yaml): **pdb unplugging** | Name | Description/Value | @@ -461,7 +462,7 @@ To unplug the PDB, use the file [`unplug_lrpdb1_resource.yaml`](./usecase/unplug |pdbName | Name of the pluggable database (PDB)| ### Plug PDB -To plug in the PDB, use the file [`plug_lrpdb1_resource.yaml`](./usecase/plug_lrpdb1_resource.yaml). In this example, we plug in the PDB that was unpluged in the previous step: +To plug in the PDB, use the file [`plug_pdb1_resource.yaml`](./usecase/plug_pdb1_resource.yaml). In this example, we plug in the PDB that was unpluged in the previous step: **pdb plugging** | Name | Description/Value | @@ -478,7 +479,7 @@ To plug in the PDB, use the file [`plug_lrpdb1_resource.yaml`](./usecase/plug_lr ### Delete PDB -To delete the PDB, use the file [`delete_lrpdb1_resource.yaml`](./usecase/delete_lrpdb1_resource.yaml) +To delete the PDB, use the file [`delete_pdb1_resource.yaml`](./usecase/delete_pdb1_resource.yaml) **pdb deletion** @@ -493,8 +494,8 @@ To delete the PDB, use the file [`delete_lrpdb1_resource.yaml`](./usecase/delete ### Map PDB -If you need to create a CRD for an existing PDB, then you can use the map option by applying the file [`map_lrpdb1_resource.yaml`](./usecase/map_lrpdb1_resource.yaml) - +If you need to create a CRD for an existing PDB, then you can use the map option by applying the file [`map_pdb1_resource.yaml`](./usecase/map_pdb1_resource.yaml) +Map functionality can be used in a situation where you have a pdb which is not registered in the operator as a CRD. It's a temporary solution while waiting the autodiscovery to be available.
diff --git a/docs/multitenant/lrest-based/usecase/makefile b/docs/multitenant/lrest-based/usecase/makefile index 4203baa4..1de320ad 100644 --- a/docs/multitenant/lrest-based/usecase/makefile +++ b/docs/multitenant/lrest-based/usecase/makefile @@ -31,27 +31,27 @@ # # ----------------------------- ---------------------------------- # oracle-database-operator.yaml : oracle database operator -# cdbnamespace_binding.yaml : role binding for cdbnamespace +# lrestnamespace_binding.yaml : role binding for lrestnamespace # pdbnamespace_binding.yaml : role binding for pdbnamespace # create_lrest_secret.yaml : create secrets for rest server pod # create_lrpdb_secret.yaml : create secrets for pluggable database # create_lrest_pod.yaml : create rest server pod -# create_pdb1_resource.yaml : create first pluggable database -# create_pdb2_resource.yaml : create second pluggable database -# open_pdb1_resource.yaml : open first pluggable database -# open_pdb2_resource.yaml : open second pluggable database -# close_pdb1_resource.yaml : close first pluggable database -# close_pdb2_resource.yaml : close second pluggable database +# create_pdb1_resource.yaml : create first pluggable database +# create_pdb2_resource.yaml : create second pluggable database +# open_pdb1_resource.yaml : open first pluggable database +# open_pdb2_resource.yaml : open second pluggable database +# close_pdb1_resource.yaml : close first pluggable database +# close_pdb2_resource.yaml : close second pluggable database # clone_lrpdb_resource.yaml : clone thrid pluggable database -# clone_pdb2_resource.yaml : clone 4th pluggable database -# delete_pdb1_resource.yaml : delete first pluggable database -# delete_pdb2_resource.yaml : delete sencond pluggable database -# delete_pdb3_resource.yaml : delete thrid pluggable database -# unplug_pdb1_resource.yaml : unplug first pluggable database -# plug_pdb1_resource.yaml : plug first pluggable database -# map_pdb1_resource.yaml : map the first pluggable database +# clone_pdb2_resource.yaml : clone 4th pluggable database +# delete_pdb1_resource.yaml : delete first pluggable database +# delete_pdb2_resource.yaml : delete sencond pluggable database +# delete_pdb3_resource.yaml : delete thrid pluggable database +# unplug_pdb1_resource.yaml : unplug first pluggable database +# plug_pdb1_resource.yaml : plug first pluggable database +# map_pdb1_resource.yaml : map the first pluggable database # config_map.yam : pdb parameters array -# altersystem_pdb1_resource.yaml : chage cpu_count count parameter for the first pdb +# altersystem_pdb1_resource.yaml : chage cpu_count count parameter for the first pdb # DATE := `date "+%y%m%d%H%M%S"` ###################### @@ -328,7 +328,7 @@ apiVersion: database.oracle.com/${APIVERSION} kind: LREST metadata: name: cdb-dev - namespace: cdbnamespace + namespace: ${LRSNAMESPACE} spec: cdbName: "DB12" lrestImage: ${LRESTIMG} @@ -524,7 +524,7 @@ spec: cdbResName: "cdb-dev" cdbNamespace: "${LRSNAMESPACE}" cdbName: "DB12" - pdbName: ""new_clone" + pdbName: "new_clone" pdbState: "CLOSE" modifyOption: "IMMEDIATE" action: "Modify" @@ -619,7 +619,7 @@ spec: cdbNamespace: "${LRSNAMESPACE}" cdbName: "DB12" pdbName: "pdbdev" - xmlFileName: "/tmp/pdb.xml" + xmlFileName: "/var/tmp/pdb.$$.xml" action: "Unplug" EOF @@ -636,7 +636,7 @@ spec: cdbNamespace: "${LRSNAMESPACE}" cdbName: "DB12" pdbName: "pdbdev" - xmlFileName: "/tmp/pdb.xml" + xmlFileName: "/var/tmp/pdb.$$.xml" action: "plug" fileNameConversions: "NONE" sourceFileNameConversions: "NONE" @@ -901,10 +901,10 @@ run07.1: $(KUBECTL) get lrpdb -n $(PDBNAMESPACE) run99.1: - $(KUBECTL) delete lrest cdb-dev -n cdbnamespace + $(KUBECTL) delete lrest cdb-dev -n $(LRSNAMESPACE) $(KUBECTL) wait --for=delete lrest cdb-dev -n $(LRSNAMESPACE) --timeout=$(TEST_EXEC_TIMEOUT) - $(KUBECTL) get lrest -n cdbnamespaace - $(KUBECTL) get lrpdb -n pdbnamespaace + $(KUBECTL) get lrest -n $(LRSNAMESPACE) + $(KUBECTL) get lrpdb -n $(PDBNAMESPACE) runall01: run00 run01.1 run01.2 run02.1 run02.2 run03.1 run03.2 run04.1 run04.2 run05.1 run06.1 run07.1 From d4edab26470d7435846c3224a16b6b07ec7f14e2 Mon Sep 17 00:00:00 2001 From: matteo malvezzi Date: Sat, 29 Mar 2025 22:13:29 +0100 Subject: [PATCH 18/24] V1 cdb (#167) * cdb_types v1alpha1 placeholder * pdb types v1alpha1 placeholder --------- Co-authored-by: oracle database --- apis/database/v1alpha1/cdb_types.go | 190 ++++++++++++++++++++++ apis/database/v1alpha1/pdb_types.go | 236 ++++++++++++++++++++++++++++ 2 files changed, 426 insertions(+) create mode 100644 apis/database/v1alpha1/cdb_types.go create mode 100644 apis/database/v1alpha1/pdb_types.go diff --git a/apis/database/v1alpha1/cdb_types.go b/apis/database/v1alpha1/cdb_types.go new file mode 100644 index 00000000..f97df391 --- /dev/null +++ b/apis/database/v1alpha1/cdb_types.go @@ -0,0 +1,190 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CDBSpec defines the desired state of CDB +type CDBSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Name of the CDB + CDBName string `json:"cdbName,omitempty"` + // Name of the CDB Service + ServiceName string `json:"serviceName,omitempty"` + + // Password for the CDB System Administrator + SysAdminPwd CDBSysAdminPassword `json:"sysAdminPwd,omitempty"` + // User in the root container with sysdba priviledges to manage PDB lifecycle + CDBAdminUser CDBAdminUser `json:"cdbAdminUser,omitempty"` + // Password for the CDB Administrator to manage PDB lifecycle + CDBAdminPwd CDBAdminPassword `json:"cdbAdminPwd,omitempty"` + + CDBTlsKey CDBTLSKEY `json:"cdbTlsKey,omitempty"` + CDBTlsCrt CDBTLSCRT `json:"cdbTlsCrt,omitempty"` + + // Password for user ORDS_PUBLIC_USER + ORDSPwd ORDSPassword `json:"ordsPwd,omitempty"` + // ORDS server port. For now, keep it as 8888. TO BE USED IN FUTURE RELEASE. + ORDSPort int `json:"ordsPort,omitempty"` + // ORDS Image Name + ORDSImage string `json:"ordsImage,omitempty"` + // The name of the image pull secret in case of a private docker repository. + ORDSImagePullSecret string `json:"ordsImagePullSecret,omitempty"` + // ORDS Image Pull Policy + // +kubebuilder:validation:Enum=Always;Never + ORDSImagePullPolicy string `json:"ordsImagePullPolicy,omitempty"` + // Number of ORDS Containers to create + Replicas int `json:"replicas,omitempty"` + // Web Server User with SQL Administrator role to allow us to authenticate to the PDB Lifecycle Management REST endpoints + WebServerUser WebServerUser `json:"webServerUser,omitempty"` + // Password for the Web Server User + WebServerPwd WebServerPassword `json:"webServerPwd,omitempty"` + // Name of the DB server + DBServer string `json:"dbServer,omitempty"` + // DB server port + DBPort int `json:"dbPort,omitempty"` + // Node Selector for running the Pod + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + DeletePDBCascade bool `json:"deletePdbCascade,omitempty"` + DBTnsurl string `json:"dbTnsurl,omitempty"` + CDBPubKey CDBPUBKEY `json:"cdbOrdsPubKey,omitempty"` + CDBPriKey CDBPRIVKEY `json:"cdbOrdsPrvKey,omitempty"` +} + +// CDBSecret defines the secretName +type CDBSecret struct { + SecretName string `json:"secretName"` + Key string `json:"key"` +} + +// CDBSysAdminPassword defines the secret containing SysAdmin Password mapped to key 'sysAdminPwd' for CDB +type CDBSysAdminPassword struct { + Secret CDBSecret `json:"secret"` +} + +// CDBAdminUser defines the secret containing CDB Administrator User mapped to key 'cdbAdminUser' to manage PDB lifecycle +type CDBAdminUser struct { + Secret CDBSecret `json:"secret"` +} + +// CDBAdminPassword defines the secret containing CDB Administrator Password mapped to key 'cdbAdminPwd' to manage PDB lifecycle +type CDBAdminPassword struct { + Secret CDBSecret `json:"secret"` +} + +// ORDSPassword defines the secret containing ORDS_PUBLIC_USER Password mapped to key 'ordsPwd' +type ORDSPassword struct { + Secret CDBSecret `json:"secret"` +} + +// WebServerUser defines the secret containing Web Server User mapped to key 'webServerUser' to manage PDB lifecycle +type WebServerUser struct { + Secret CDBSecret `json:"secret"` +} + +// WebServerPassword defines the secret containing password for Web Server User mapped to key 'webServerPwd' to manage PDB lifecycle +type WebServerPassword struct { + Secret CDBSecret `json:"secret"` +} + +type CDBTLSKEY struct { + Secret CDBSecret `json:"secret"` +} + +type CDBTLSCRT struct { + Secret CDBSecret `json:"secret"` +} + +type CDBPUBKEY struct { + Secret CDBSecret `json:"secret"` +} + +type CDBPRIVKEY struct { + Secret CDBSecret `json:"secret"` +} + +// CDBStatus defines the observed state of CDB +type CDBStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Phase of the CDB Resource + Phase string `json:"phase"` + // CDB Resource Status + Status bool `json:"status"` + // Message + Msg string `json:"msg,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".spec.cdbName",name="CDB Name",type="string",description="Name of the CDB" +// +kubebuilder:printcolumn:JSONPath=".spec.dbServer",name="DB Server",type="string",description=" Name of the DB Server" +// +kubebuilder:printcolumn:JSONPath=".spec.dbPort",name="DB Port",type="integer",description="DB server port" +// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name="Replicas",type="integer",description="Replicas" +// +kubebuilder:printcolumn:JSONPath=".status.phase",name="Status",type="string",description="Status of the CDB Resource" +// +kubebuilder:printcolumn:JSONPath=".status.msg",name="Message",type="string",description="Error message, if any" +// +kubebuilder:printcolumn:JSONPath=".spec.dbTnsurl",name="TNS STRING",type="string",description=" string of the tnsalias" +// +kubebuilder:resource:path=cdbs,scope=Namespaced + +// CDB is the Schema for the cdbs API +type CDB struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec CDBSpec `json:"spec,omitempty"` + Status CDBStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// CDBList contains a list of CDB +type CDBList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CDB `json:"items"` +} + +func init() { + SchemeBuilder.Register(&CDB{}, &CDBList{}) +} diff --git a/apis/database/v1alpha1/pdb_types.go b/apis/database/v1alpha1/pdb_types.go new file mode 100644 index 00000000..8b966c38 --- /dev/null +++ b/apis/database/v1alpha1/pdb_types.go @@ -0,0 +1,236 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PDBSpec defines the desired state of PDB +type PDBSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + PDBTlsKey PDBTLSKEY `json:"pdbTlsKey,omitempty"` + PDBTlsCrt PDBTLSCRT `json:"pdbTlsCrt,omitempty"` + PDBTlsCat PDBTLSCAT `json:"pdbTlsCat,omitempty"` + + // CDB Namespace + CDBNamespace string `json:"cdbNamespace,omitempty"` + // Name of the CDB Custom Resource that runs the ORDS container + CDBResName string `json:"cdbResName,omitempty"` + // Name of the CDB + CDBName string `json:"cdbName,omitempty"` + // The name of the new PDB. Relevant for both Create and Plug Actions. + PDBName string `json:"pdbName,omitempty"` + // Name of the Source PDB from which to clone + SrcPDBName string `json:"srcPdbName,omitempty"` + // The administrator username for the new PDB. This property is required when the Action property is Create. + AdminName PDBAdminName `json:"adminName,omitempty"` + // The administrator password for the new PDB. This property is required when the Action property is Create. + AdminPwd PDBAdminPassword `json:"adminPwd,omitempty"` + // Web Server User with SQL Administrator role to allow us to authenticate to the PDB Lifecycle Management REST endpoints + WebServerUsr WebServerUserPDB `json:"webServerUser,omitempty"` + // Password for the Web ServerPDB User + WebServerPwd WebServerPasswordPDB `json:"webServerPwd,omitempty"` + // Relevant for Create and Plug operations. As defined in the Oracle Multitenant Database documentation. Values can be a filename convert pattern or NONE. + FileNameConversions string `json:"fileNameConversions,omitempty"` + // This property is required when the Action property is Plug. As defined in the Oracle Multitenant Database documentation. Values can be a source filename convert pattern or NONE. + SourceFileNameConversions string `json:"sourceFileNameConversions,omitempty"` + // XML metadata filename to be used for Plug or Unplug operations + XMLFileName string `json:"xmlFileName,omitempty"` + // To copy files or not while cloning a PDB + // +kubebuilder:validation:Enum=COPY;NOCOPY;MOVE + CopyAction string `json:"copyAction,omitempty"` + // Specify if datafiles should be removed or not. The value can be INCLUDING or KEEP (default). + // +kubebuilder:validation:Enum=INCLUDING;KEEP + DropAction string `json:"dropAction,omitempty"` + // A Path specified for sparse clone snapshot copy. (Optional) + SparseClonePath string `json:"sparseClonePath,omitempty"` + // Whether to reuse temp file + ReuseTempFile *bool `json:"reuseTempFile,omitempty"` + // Relevant for Create and Plug operations. True for unlimited storage. Even when set to true, totalSize and tempSize MUST be specified in the request if Action is Create. + UnlimitedStorage *bool `json:"unlimitedStorage,omitempty"` + // Indicate if 'AS CLONE' option should be used in the command to plug in a PDB. This property is applicable when the Action property is PLUG but not required. + AsClone *bool `json:"asClone,omitempty"` + // Relevant for create and plug operations. Total size as defined in the Oracle Multitenant Database documentation. See size_clause description in Database SQL Language Reference documentation. + TotalSize string `json:"totalSize,omitempty"` + // Relevant for Create and Clone operations. Total size for temporary tablespace as defined in the Oracle Multitenant Database documentation. See size_clause description in Database SQL Language Reference documentation. + TempSize string `json:"tempSize,omitempty"` + // TDE import for plug operations + TDEImport *bool `json:"tdeImport,omitempty"` + // TDE export for unplug operations + TDEExport *bool `json:"tdeExport,omitempty"` + // TDE password if the tdeImport or tdeExport flag is set to true. Can be used in create, plug or unplug operations + TDEPassword TDEPwd `json:"tdePassword,omitempty"` + // TDE keystore path is required if the tdeImport or tdeExport flag is set to true. Can be used in plug or unplug operations. + TDEKeystorePath string `json:"tdeKeystorePath,omitempty"` + // TDE secret is required if the tdeImport or tdeExport flag is set to true. Can be used in plug or unplug operations. + TDESecret TDESecret `json:"tdeSecret,omitempty"` + // Whether you need the script only or execute the script + GetScript *bool `json:"getScript,omitempty"` + // Action to be taken: Create/Clone/Plug/Unplug/Delete/Modify/Status/Map. Map is used to map a Databse PDB to a Kubernetes PDB CR. + // +kubebuilder:validation:Enum=Create;Clone;Plug;Unplug;Delete;Modify;Status;Map + Action string `json:"action"` + // Extra options for opening and closing a PDB + // +kubebuilder:validation:Enum=IMMEDIATE;NORMAL;READ ONLY;READ WRITE;RESTRICTED + ModifyOption string `json:"modifyOption,omitempty"` + // The target state of the PDB + // +kubebuilder:validation:Enum=OPEN;CLOSE + PDBState string `json:"pdbState,omitempty"` + // turn on the assertive approach to delete pdb resource + // kubectl delete pdb ..... automatically triggers the pluggable database + // deletion + AssertivePdbDeletion bool `json:"assertivePdbDeletion,omitempty"` + PDBPubKey PDBPUBKEY `json:"pdbOrdsPubKey,omitempty"` + PDBPriKey PDBPRIVKEY `json:"pdbOrdsPrvKey,omitempty"` +} + +// PDBAdminName defines the secret containing Sys Admin User mapped to key 'adminName' for PDB +type PDBAdminName struct { + Secret PDBSecret `json:"secret"` +} + +// PDBAdminPassword defines the secret containing Sys Admin Password mapped to key 'adminPwd' for PDB +type PDBAdminPassword struct { + Secret PDBSecret `json:"secret"` +} + +// TDEPwd defines the secret containing TDE Wallet Password mapped to key 'tdePassword' for PDB +type TDEPwd struct { + Secret PDBSecret `json:"secret"` +} + +// TDESecret defines the secret containing TDE Secret to key 'tdeSecret' for PDB +type TDESecret struct { + Secret PDBSecret `json:"secret"` +} + +// WebServerUser defines the secret containing Web Server User mapped to key 'webServerUser' to manage PDB lifecycle + +type WebServerUserPDB struct { + Secret PDBSecret `json:"secret"` +} + +// WebServerPassword defines the secret containing password for Web Server User mapped to key 'webServerPwd' to manage PDB lifecycle +type WebServerPasswordPDB struct { + Secret PDBSecret `json:"secret"` +} + +// PDBSecret defines the secretName +type PDBSecret struct { + SecretName string `json:"secretName"` + Key string `json:"key"` +} + +type PDBTLSKEY struct { + Secret PDBSecret `json:"secret"` +} + +type PDBTLSCRT struct { + Secret PDBSecret `json:"secret"` +} + +type PDBTLSCAT struct { + Secret PDBSecret `json:"secret"` +} + +// PDBStatus defines the observed state of PDB +type PDBStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // PDB Connect String + ConnString string `json:"connString,omitempty"` + // Phase of the PDB Resource + Phase string `json:"phase"` + // PDB Resource Status + Status bool `json:"status"` + // Total size of the PDB + TotalSize string `json:"totalSize,omitempty"` + // Open mode of the PDB + OpenMode string `json:"openMode,omitempty"` + // Modify Option of the PDB + ModifyOption string `json:"modifyOption,omitempty"` + // Message + Msg string `json:"msg,omitempty"` + // Last Completed Action + Action string `json:"action,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".spec.cdbName",name="CDB Name",type="string",description="Name of the CDB" +// +kubebuilder:printcolumn:JSONPath=".spec.pdbName",name="PDB Name",type="string",description="Name of the PDB" +// +kubebuilder:printcolumn:JSONPath=".status.openMode",name="PDB State",type="string",description="PDB Open Mode" +// +kubebuilder:printcolumn:JSONPath=".status.totalSize",name="PDB Size",type="string",description="Total Size of the PDB" +// +kubebuilder:printcolumn:JSONPath=".status.phase",name="Status",type="string",description="Status of the PDB Resource" +// +kubebuilder:printcolumn:JSONPath=".status.msg",name="Message",type="string",description="Error message, if any" +// +kubebuilder:printcolumn:JSONPath=".status.connString",name="Connect_String",type="string",description="The connect string to be used" +// +kubebuilder:resource:path=pdbs,scope=Namespaced + +// PDB is the Schema for the pdbs API +type PDB struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PDBSpec `json:"spec,omitempty"` + Status PDBStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// PDBList contains a list of PDB +type PDBList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PDB `json:"items"` +} + +type PDBPUBKEY struct { + Secret PDBSecret `json:"secret"` +} + +type PDBPRIVKEY struct { + Secret PDBSecret `json:"secret"` +} + +func init() { + SchemeBuilder.Register(&PDB{}, &PDBList{}) +} From ba8419e3464d43115c823cc3dd0487c2652dc341 Mon Sep 17 00:00:00 2001 From: jpverma85 Date: Wed, 2 Apr 2025 13:06:38 -0400 Subject: [PATCH 19/24] doc changes (#172) --- docs/sharding/README.md | 76 ++----------------- .../create_kubernetes_secret_for_db_user.md | 2 + .../sharding_provisioning_with_free_images.md | 5 +- ...harding_provisioning_with_free_images.yaml | 4 +- ...y_cloning_db_from_gold_image_across_ads.md | 3 +- ...ing_by_cloning_db_gold_image_in_same_ad.md | 3 +- ...ding_provisioning_with_chunks_specified.md | 3 +- ..._provisioning_with_control_on_resources.md | 3 +- ...ith_notification_using_oci_notification.md | 3 +- ...ding_provisioning_without_db_gold_image.md | 3 +- ...rding_scale_in_delete_an_existing_shard.md | 3 +- .../snr_ssharding_scale_out_add_shards.md | 3 +- .../snr_ssharding_shard_prov.yaml | 3 +- .../snr_ssharding_shard_prov_chunks.yaml | 3 +- .../snr_ssharding_shard_prov_clone.yaml | 3 +- ...ssharding_shard_prov_clone_across_ads.yaml | 3 +- .../snr_ssharding_shard_prov_delshard.yaml | 3 +- .../snr_ssharding_shard_prov_extshard.yaml | 3 +- .../snr_ssharding_shard_prov_memory_cpu.yaml | 3 +- ...sharding_shard_prov_send_notification.yaml | 3 +- ...y_cloning_db_from_gold_image_across_ads.md | 1 + ...ing_by_cloning_db_gold_image_in_same_ad.md | 1 + ...ding_provisioning_with_chunks_specified.md | 1 + ..._provisioning_with_control_on_resources.md | 1 + ...ith_notification_using_oci_notification.md | 1 + ...ding_provisioning_without_db_gold_image.md | 2 +- ...rding_scale_in_delete_an_existing_shard.md | 1 + .../ssharding_scale_out_add_shards.md | 1 + .../system_sharding/ssharding_shard_prov.yaml | 4 +- .../ssharding_shard_prov_chunks.yaml | 4 +- .../ssharding_shard_prov_clone.yaml | 4 +- ...ssharding_shard_prov_clone_across_ads.yaml | 4 +- .../ssharding_shard_prov_delshard.yaml | 4 +- .../ssharding_shard_prov_extshard.yaml | 4 +- .../ssharding_shard_prov_memory_cpu.yaml | 4 +- ...sharding_shard_prov_send_notification.yaml | 4 +- ...y_cloning_db_from_gold_image_across_ads.md | 1 + ...ing_by_cloning_db_gold_image_in_same_ad.md | 1 + ..._provisioning_with_control_on_resources.md | 1 + ...ith_notification_using_oci_notification.md | 1 + ...ding_provisioning_without_db_gold_image.md | 1 + ...rding_scale_in_delete_an_existing_shard.md | 1 + .../udsharding_scale_out_add_shards.md | 1 + .../udsharding_shard_prov.yaml | 4 +- .../udsharding_shard_prov_clone.yaml | 4 +- ...dsharding_shard_prov_clone_across_ads.yaml | 4 +- .../udsharding_shard_prov_delshard.yaml | 4 +- .../udsharding_shard_prov_extshard.yaml | 4 +- .../udsharding_shard_prov_memory_cpu.yaml | 4 +- ...sharding_shard_prov_send_notification.yaml | 4 +- 50 files changed, 91 insertions(+), 120 deletions(-) diff --git a/docs/sharding/README.md b/docs/sharding/README.md index 661e2546..487d9ec3 100644 --- a/docs/sharding/README.md +++ b/docs/sharding/README.md @@ -72,7 +72,7 @@ Choose one of the following deployment options: **Use Oracle-Supplied Docker Images:** The Oracle Sharding Database controller uses Oracle Global Data Services and Oracle Database images to provision the sharding topology. - You can also download the pre-built Oracle Global Data Services `container-registry.oracle.com/database/gsm:latest` and Oracle Database images `container-registry.oracle.com/database/enterprise:latest` from [Oracle Container Registry](https://container-registry.oracle.com/ords/f?p=113:10::::::). These images are functionally tested and evaluated with various use cases of Oracle Globally Distributed Database topology by deploying on OKE and OLCNE. + You can also download the pre-built Oracle Global Data Services and Oracle Database images from [Oracle Container Registry](https://container-registry.oracle.com/ords/f?p=113:10::::::). These images are functionally tested and evaluated with various use cases of Oracle Globally Distributed Database topology by deploying on OKE and OLCNE. You can refer to [Oracle Container Registry Images for Oracle Globally Distributed Database Deployment](https://github.com/oracle/db-sharding/blob/master/container-based-sharding-deployment/README.md#oracle-container-registry-images-for-oracle-globally-distributed-database-deployment) **Note:** You will need to accept Agreement from container-registry.orcale.com to be able to pull the pre-built container images. @@ -80,7 +80,7 @@ Choose one of the following deployment options: **Build your own Oracle Database and Global Data Services Docker Images:** You can build these images using instructions provided on Oracle official GitHub Repositories: - * [Oracle Global Data Services Image](https://github.com/oracle/db-sharding/tree/master/docker-based-sharding-deployment/dockerfiles) + * [Oracle Global Data Services Image](https://github.com/oracle/db-sharding/tree/master/container-based-sharding-deployment) * [Oracle Database Image](https://github.com/oracle/docker-images/tree/main/OracleDatabase/SingleInstance) After the images are ready, push them to your Docker Images Repository, so that you can pull them during Oracle Globally Distributed Database topology provisioning. @@ -91,8 +91,6 @@ You can either download the images and push them to your Docker Images Repositor **Note:** In case you want to use the `Oracle Database 23ai Free` Image for Database and GSM, refer to section [Oracle Database 23ai Free](#oracle-database-23ai-free) for more details. -### 4. Create a namespace for the Oracle DB Sharding Setup - ### 4. Create a namespace for the Oracle Globally Distributed Database Setup Create a Kubernetes namespace named `shns`. All the resources belonging to the Oracle Globally Distributed Database Topology Setup will be provisioned in this namespace named `shns`. For example: @@ -107,6 +105,8 @@ You can either download the images and push them to your Docker Images Repositor ### 5. Create a Kubernetes secret for the database installation owner for the Oracle Globally Distributed Database Topology Deployment +**IMPORTANT:** Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generate the encrypted password file during the deployment. If you want to use Prebuilt Oracle Database and Oracle GSM Images from Oracle Container Registry for your deployment, you can refer to [Oracle Container Registry Images for Oracle Globally Distributed Database Deployment](https://github.com/oracle/db-sharding/blob/master/container-based-sharding-deployment/README.md#oracle-container-registry-images-for-oracle-globally-distributed-database-deployment) + Create a Kubernetes secret named `db-user-pass-rsa` using these steps: [Create Kubernetes Secret](./provisioning/create_kubernetes_secret_for_db_user.md) After you have the above prerequisites completed, you can proceed to the next section for your environment to provision the Oracle Database Sharding Topology. @@ -119,71 +119,6 @@ In case of an `OCI OKE` cluster, you can use this Persistent Volume during provi You can refer [here](./provisioning/provisioning_persistent_volume_having_db_gold_image.md) for the steps involved. -**NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. So, this step will not be needed if you are deploying Oracle Sharded Database using Oracle 23ai Free Database and GSM Images. - -## Oracle Database 23ai Free - -Please refer to [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) documentation for more details. - -If you want to use Oracle Database 23ai Free Image for Database and GSM for deployment of the Sharded Database using Sharding Controller in Oracle Database Kubernetes Operator, you need to consider the below points: - -* To deploy using the FREE Database and GSM Image, you will need to add the additional parameter `dbEdition: "free"` to the .yaml file. -* Refer to [Sample Sharded Database Deployment using Oracle 23ai FREE Database and GSM Images](./provisioning/free/sharding_provisioning_with_free_images.md) for an example. -* For Oracle Database 23ai Free, you can control the `CPU` and `Memory` allocation of the PODs using tags `cpu` and `memory` respectively but tags `INIT_SGA_SIZE` and `INIT_PGA_SIZE` to control the SGA and PGA allocation at the database level are `not` supported. -* Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. -* Total number of chunks for FREE Database defaults to `12` if `CATALOG_CHUNKS` parameter is not specified. This default value is determined considering limitation of 12 GB of user data on disk for oracle free database. - - -## Provisioning Sharding Topology with System-Managed Sharding in a Cloud-Based Kubernetes Cluster - -Deploy Oracle Database Sharding Topology with `System-Managed Sharding` on your Cloud based Kubernetes cluster. - -In this example, the deployment uses the YAML file based on `OCI OKE` cluster. There are multiple use case possible for deploying the Oracle Database Sharding Topology covered by below examples: - -[1. Provisioning Oracle Sharded Database with System-Managed Sharding without Database Gold Image](./provisioning/system_sharding/ssharding_provisioning_without_db_gold_image.md) -[2. Provisioning Oracle Sharded Database with System-Managed Sharding with number of chunks specified](./provisioning/system_sharding/ssharding_provisioning_with_chunks_specified.md) -[3. Provisioning Oracle Sharded Database with System-Managed Sharding with additional control on resources like Memory and CPU allocated to Pods](./provisioning/system_sharding/ssharding_provisioning_with_control_on_resources.md) -[4. Provisioning Oracle Sharded Database with System-Managed Sharding by cloning database from your own Database Gold Image in the same Availability Domain(AD)](./provisioning/system_sharding/ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md) -[5. Provisioning Oracle Sharded Database with System-Managed Sharding by cloning database from your own Database Gold Image across Availability Domains(ADs)](./provisioning/system_sharding/ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md) -[6. Provisioning Oracle Sharded Database with System-Managed Sharding and send Notification using OCI Notification Service](./provisioning/system_sharding/ssharding_provisioning_with_notification_using_oci_notification.md) -[7. Scale Out - Add Shards to an existing Oracle Sharded Database provisioned earlier with System-Managed Sharding](./provisioning/system_sharding/ssharding_scale_out_add_shards.md) -[8. Scale In - Delete an existing Shard from a working Oracle Sharded Database provisioned earlier with System-Managed Sharding](./provisioning/system_sharding/ssharding_scale_in_delete_an_existing_shard.md) - - -## Provisioning Sharding Topology with User Defined Sharding in a Cloud-Based Kubernetes Cluster - -Deploy Oracle Database Sharding Topology with `User Defined Sharding` on your Cloud based Kubernetes cluster. - -In this example, the deployment uses the YAML file based on `OCI OKE` cluster. There are multiple use case possible for deploying the Oracle Database Sharding Topology covered by below examples: - -[1. Provisioning Oracle Sharded Database with User Defined Sharding without Database Gold Image](./provisioning/user-defined-sharding/udsharding_provisioning_without_db_gold_image.md) -[2. Provisioning Oracle Sharded Database with User Defined Sharding with additional control on resources like Memory and CPU allocated to Pods](./provisioning/user-defined-sharding/udsharding_provisioning_with_control_on_resources.md) -[3. Provisioning Oracle Sharded Database with User Defined Sharding by cloning database from your own Database Gold Image in the same Availability Domain(AD)](./provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_gold_image_in_same_ad.md) -[4. Provisioning Oracle Sharded Database with User Defined Sharding by cloning database from your own Database Gold Image across Availability Domains(ADs)](./provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_from_gold_image_across_ads.md) -[5. Provisioning Oracle Sharded Database with User Defined Sharding and send Notification using OCI Notification Service](./provisioning/user-defined-sharding/udsharding_provisioning_with_notification_using_oci_notification.md) -[6. Scale Out - Add Shards to an existing Oracle Sharded Database provisioned earlier with User Defined Sharding](./provisioning/user-defined-sharding/udsharding_scale_out_add_shards.md) -[7. Scale In - Delete an existing Shard from a working Oracle Sharded Database provisioned earlier with User Defined Sharding](./provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md) - - -## Provisioning System-Managed Sharding Topology with Raft replication enabled in a Cloud-Based Kubernetes Cluster - -Deploy Oracle Database Sharding Topology with `System-Managed Sharding with SNR RAFT enabled` on your Cloud based Kubernetes cluster. - -**NOTE: SNR RAFT Feature is available only for Oracle 23ai RDBMS and Oracle 23ai GSM version.** - -In this example, the deployment uses the YAML file based on `OCI OKE` cluster. There are multiple use case possible for deploying the Oracle Database Sharding Topology covered by below examples: - -[1. Provisioning System-Managed Sharding Topology with Raft replication enabled without Database Gold Image](./provisioning/snr_system_sharding/snr_ssharding_provisioning_without_db_gold_image.md) -[2. Provisioning System-Managed Sharding Topology with Raft replication enabled with number of chunks specified](./provisioning/snr_system_sharding/snr_ssharding_provisioning_with_chunks_specified.md) -[3. Provisioning System-Managed Sharding Topology with Raft replication enabled with additional control on resources like Memory and CPU allocated to Pods](./provisioning/snr_system_sharding/snr_ssharding_provisioning_with_control_on_resources.md) -[4. Provisioning System-Managed Sharding Topology with Raft replication enabled by cloning database from your own Database Gold Image in the same Availability Domain(AD)](./provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md) -[5. Provisioning System-Managed Sharding Topology with Raft replication enabled by cloning database from your own Database Gold Image across Availability Domains(ADs)](./provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md) -[6. Provisioning System-Managed Sharding Topology with Raft replication enabled and send Notification using OCI Notification Service](./provisioning/snr_system_sharding/snr_ssharding_provisioning_with_notification_using_oci_notification.md) -[7. Scale Out - Add Shards to an existing Oracle Sharded Database provisioned earlier with System-Managed Sharding with RAFT replication enabled](./provisioning/snr_system_sharding/snr_ssharding_scale_out_add_shards.md) -[8. Scale In - Delete an existing Shard from a working Oracle Sharded Database provisioned earlier with System-Managed Sharding with RAFT reolication enabled](./provisioning/snr_system_sharding/snr_ssharding_scale_in_delete_an_existing_shard.md) - -You can refer [here](./provisioning/provisioning_persistent_volume_having_db_gold_image.md) for the steps involved. - **NOTE:** Provisioning the Oracle Globally Distributed Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. So, this step will not be needed if you are deploying Oracle Globally Distributed Database using Oracle 23ai Free Database and GSM Images. ## Oracle Database 23ai Free @@ -262,4 +197,5 @@ To debug the Oracle Globally Distributed Database Topology provisioned using the * For both ENTERPRISE and FREE Images, if the CATALOG Database Pod is stopped from the worker node using the command `crictl stopp`, then it can leave the CATALOG in an error state. This error state results in GSM reporting the error message **GSM-45034: Connection to GDS catalog is not established.** * For both ENTERPRISE and FREE Images, either restart of node running the SHARD Pod using `/sbin/reboot -f` or stopping the Shard Database Pod from the worker node using `crictl stopp` command can leave the shard in an error state. * For both ENTERPRISE and FREE Images, after force restarts of the node running GSM Pod, the GSM pod restarts multiple times, and then becomes stable. The GSM pod restarts itself because when the worker node comes up, the GSM pod is recreated, but does not obtain DB connection to the Catalog. The Liveness Probe fails which restarts the Pod. Be aware of this issue, and permit the GSM pod to become stable. -* **DDL Propagation from Catalog to Shards:** DDL Propagation from the Catalog Database to the Shard Databases can take several minutes to complete. To see faster propagation of DDLs such as the tablespace set from the Catalog Database to the Shard Databases, Oracle recommends that you set smaller chunk values by using the `CATALOG_CHUNKS` attribute in the .yaml file while creating the Sharded Database Topology. \ No newline at end of file +* **DDL Propagation from Catalog to Shards:** DDL Propagation from the Catalog Database to the Shard Databases can take several minutes to complete. To see faster propagation of DDLs such as the tablespace set from the Catalog Database to the Shard Databases, Oracle recommends that you set smaller chunk values by using the `CATALOG_CHUNKS` attribute in the .yaml file while creating the Sharded Database Topology. +* If the version of `openssl` used to create the encrypted password file for Kubernetes secrets is not compatible with the openssl verion of the Oracle Database and Oracle GSM Image, then you can get the error `OS command returned code : 1, returned error : bad magic number` in the logs of the Database or GSM Pod. In this case, during the deployment, openssl will not be able to decrypt the encrypted password file and the deployment will not complete. \ No newline at end of file diff --git a/docs/sharding/provisioning/create_kubernetes_secret_for_db_user.md b/docs/sharding/provisioning/create_kubernetes_secret_for_db_user.md index 744f972c..db534575 100644 --- a/docs/sharding/provisioning/create_kubernetes_secret_for_db_user.md +++ b/docs/sharding/provisioning/create_kubernetes_secret_for_db_user.md @@ -8,6 +8,8 @@ Use the following steps to create an encrypted file with a password for the DB U - Remove the initial text file. - Create the Kubernetes Secret named `db-user-pass-rsa` using the encrypted file. +**IMPORTANT:** Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. + To understand how to create your own file, use the following example: ```sh diff --git a/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.md b/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.md index 61641312..0425920b 100644 --- a/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.md +++ b/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.md @@ -19,8 +19,11 @@ This example uses `sharding_provisioning_with_free_images.yaml` to provision an To get the Oracle 23ai FREE Database and GSM Images: * The Oracle 23ai FREE RDBMS Image used is `container-registry.oracle.com/database/free:latest`. Check [Oracle Database Free Get Started](https://www.oracle.com/database/free/get-started/?source=v0-DBFree-ChatCTA-j2032-20240709) for details. * To pull the above image from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. - * Use the Oracle 23ai FREE GSM Binaries `LINUX.X64_234000_gsm.zip` as listed on page [Oracle Database Free Get Started](https://www.oracle.com/database/free/get-started/?source=v0-DBFree-ChatCTA-j2032-20240709) and prepare the GSM Container Image following [Oracle Global Data Services Image](https://github.com/oracle/db-sharding/tree/master/docker-based-sharding-deployment/dockerfiles) + * The the Oracle 23ai FREE GSM Image used is `container-registry.oracle.com/database/gsm:latest`. + * To pull the above image from Oracle Container Registry, create a Kubernetes secret named `ocr-reg-cred` using your credentials with type set to `kubernetes.io/dockerconfigjson` in the namespace `shns`. * You need to change `dbImage` and `gsmImage` tag with the images you want to use in your enviornment in file `sharding_provisioning_with_free_images.yaml`. + +**IMPORTANT:** Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. diff --git a/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.yaml b/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.yaml index 954ede63..dadd619a 100644 --- a/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.yaml +++ b/docs/sharding/provisioning/free/sharding_provisioning_with_free_images.yaml @@ -41,8 +41,8 @@ spec: storageClass: oci dbImage: container-registry.oracle.com/database/free:latest dbImagePullSecret: ocr-reg-cred - gsmImage: - gsmImagePullSecret: + gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImagePullSecret: ocr-reg-cred dbEdition: "free" isExternalSvc: False isDeleteOraPvc: True diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md index ba72be25..9ffebad9 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md @@ -1,6 +1,6 @@ # Provisioning System managed Sharding Topology with Raft replication enabled by cloning database from your own Database Gold Image across Availability Domains(ADs) -**NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** +**NOTE: RAFT Replication Feature is available only for Oracle 23ai RDBMS and Oracle 23ai GSM version.** **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. @@ -40,6 +40,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov_clone_across_ads.yaml`. * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. Use the file: [snr_ssharding_shard_prov_clone_across_ads.yaml](./snr_ssharding_shard_prov_clone_across_ads.yaml) for this use case as below: diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md index cf4240f7..054d760e 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md @@ -1,6 +1,6 @@ # Provisioning System managed Sharding Topology with Raft replication enabled by cloning database from your own Database Gold Image in the same Availability Domain(AD) -**NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** +**NOTE: RAFT Replication Feature is available only for Oracle 23ai RDBMS and Oracle 23ai GSM version.** **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. @@ -34,6 +34,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov_clone.yaml`. * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. **NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_chunks_specified.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_chunks_specified.md index 44972090..253d099b 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_chunks_specified.md +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_chunks_specified.md @@ -1,6 +1,6 @@ # Provisioning System-Managed Sharding Topology with Raft replication enabled with number of chunks specified -**NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** +**NOTE: RAFT Replication Feature is available only for Oracle 23ai RDBMS and Oracle 23ai GSM version.** **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. @@ -25,6 +25,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. Use the file: [snr_ssharding_shard_prov_chunks.yaml](./snr_ssharding_shard_prov_chunks.yaml) for this use case as below: diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_control_on_resources.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_control_on_resources.md index 9cfd6afb..e017f6a9 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_control_on_resources.md +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_control_on_resources.md @@ -1,6 +1,6 @@ # Provisioning System-Managed Sharding Topology with Raft replication enabled with additional control on resources like Memory and CPU allocated to Pods -**NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** +**NOTE: RAFT Replication Feature is available only for Oracle 23ai RDBMS and Oracle 23ai GSM version.** **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. @@ -21,6 +21,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov_memory_cpu.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. **NOTE:** For Oracle Database 23ai Free, you can control the `CPU` and `Memory` allocation of the PODs using tags `cpu` and `memory` respectively but tags `INIT_SGA_SIZE` and `INIT_PGA_SIZE` to control the SGA and PGA allocation at the database level are `not` supported. diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_notification_using_oci_notification.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_notification_using_oci_notification.md index d4cb11de..3b8d1665 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_notification_using_oci_notification.md +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_with_notification_using_oci_notification.md @@ -1,6 +1,6 @@ # Provisioning System managed Sharding Topology with Raft replication enabled and send Notification using OCI Notification Service -**NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** +**NOTE: RAFT Replication Feature is available only for Oracle 23ai RDBMS and Oracle 23ai GSM version.** **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. @@ -69,6 +69,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov_send_notification.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. **NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_without_db_gold_image.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_without_db_gold_image.md index 892741a5..91caddf1 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_without_db_gold_image.md +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_provisioning_without_db_gold_image.md @@ -1,6 +1,6 @@ # Provisioning System-Managed Sharding Topology with Raft replication enabled without Database Gold Image -**NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** +**NOTE: RAFT Replication Feature is available only for Oracle 23ai RDBMS and Oracle 23ai GSM version.** **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. @@ -22,6 +22,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. Use the file: [snr_ssharding_shard_prov.yaml](./snr_ssharding_shard_prov.yaml) for this use case as below: diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_in_delete_an_existing_shard.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_in_delete_an_existing_shard.md index fe3157ec..fc093654 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_in_delete_an_existing_shard.md +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_in_delete_an_existing_shard.md @@ -1,6 +1,6 @@ # Scale In - Delete an existing Shard from a working Oracle Sharded Database provisioned earlier with System-Managed Sharding with RAFT reolication enabled -**NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** +**NOTE: RAFT Replication Feature is available only for Oracle 23ai RDBMS and Oracle 23ai GSM version.** **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. @@ -21,6 +21,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov_delshard.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. NOTE: Use tag `isDelete: enable` to delete the shard you want. diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_out_add_shards.md b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_out_add_shards.md index 03423e72..3461bf13 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_out_add_shards.md +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_scale_out_add_shards.md @@ -1,6 +1,6 @@ # Scale Out - Add Shards to an existing Oracle Sharded Database provisioned earlier with System-Managed Sharding with RAFT replication enabled -**NOTE: RAFT Replication Feature is available only for Oracle 23c RDBMS and Oracle 23c GSM version.** +**NOTE: RAFT Replication Feature is available only for Oracle 23ai RDBMS and Oracle 23ai GSM version.** **IMPORTANT:** Make sure you have completed the steps for [Prerequsites for Running Oracle Sharding Database Controller](../../README.md#prerequsites-for-running-oracle-sharding-database-controller) before using Oracle Sharding Controller. @@ -19,6 +19,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `snr_ssharding_shard_prov_extshard.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) * If the existing Sharding Topology was deployed using [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then the additional parameter `dbEdition: "free"` will be needed for the below .yaml file as well. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. This use case adds two new shards `shard4`,`shard5` to above Sharding Topology. diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov.yaml index aabd8470..53b93a0d 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov.yaml +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov.yaml @@ -39,10 +39,11 @@ spec: storageSizeInGb: 50 region: standby storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest + dbImage: container-registry.oracle.com/database/free:latest dbImagePullSecret: ocr-reg-cred gsmImage: container-registry.oracle.com/database/gsm:latest gsmImagePullSecret: ocr-reg-cred + dbEdition: "free" replicationType: "native" isExternalSvc: False isDeleteOraPvc: True diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_chunks.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_chunks.yaml index def7e73a..0230eac2 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_chunks.yaml +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_chunks.yaml @@ -42,10 +42,11 @@ spec: storageSizeInGb: 50 region: standby storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest + dbImage: container-registry.oracle.com/database/free:latest dbImagePullSecret: ocr-reg-cred gsmImage: container-registry.oracle.com/database/gsm:latest gsmImagePullSecret: ocr-reg-cred + dbEdition: "free" replicationType: "native" isExternalSvc: False isDeleteOraPvc: True diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone.yaml index 8f17331e..fcc18da0 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone.yaml +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone.yaml @@ -63,10 +63,11 @@ spec: storageSizeInGb: 50 region: standby storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest + dbImage: container-registry.oracle.com/database/free:latest dbImagePullSecret: ocr-reg-cred gsmImage: container-registry.oracle.com/database/gsm:latest gsmImagePullSecret: ocr-reg-cred + dbEdition: "free" replicationType: "native" isExternalSvc: False isDeleteOraPvc: True diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone_across_ads.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone_across_ads.yaml index d0c1c6e0..0663f8a5 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone_across_ads.yaml +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_clone_across_ads.yaml @@ -71,10 +71,11 @@ spec: pvMatchLabels: "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest + dbImage: container-registry.oracle.com/database/free:latest dbImagePullSecret: ocr-reg-cred gsmImage: container-registry.oracle.com/database/gsm:latest gsmImagePullSecret: ocr-reg-cred + dbEdition: "free" replicationType: "native" isExternalSvc: False isDeleteOraPvc: True diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_delshard.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_delshard.yaml index 0859b089..ce194246 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_delshard.yaml +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_delshard.yaml @@ -50,10 +50,11 @@ spec: storageSizeInGb: 50 region: standby storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest + dbImage: container-registry.oracle.com/database/free:latest dbImagePullSecret: ocr-reg-cred gsmImage: container-registry.oracle.com/database/gsm:latest gsmImagePullSecret: ocr-reg-cred + dbEdition: "free" replicationType: "native" isExternalSvc: False isDeleteOraPvc: True diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_extshard.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_extshard.yaml index 123b3ae1..8848b8c7 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_extshard.yaml +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_extshard.yaml @@ -49,10 +49,11 @@ spec: storageSizeInGb: 50 region: standby storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest + dbImage: container-registry.oracle.com/database/free:latest dbImagePullSecret: ocr-reg-cred gsmImage: container-registry.oracle.com/database/gsm:latest gsmImagePullSecret: ocr-reg-cred + dbEdition: "free" replicationType: "native" isExternalSvc: False isDeleteOraPvc: True diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_memory_cpu.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_memory_cpu.yaml index 0cfccf9a..dce4ba29 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_memory_cpu.yaml +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_memory_cpu.yaml @@ -70,10 +70,11 @@ spec: storageSizeInGb: 50 region: standby storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest + dbImage: container-registry.oracle.com/database/free:latest dbImagePullSecret: ocr-reg-cred gsmImage: container-registry.oracle.com/database/gsm:latest gsmImagePullSecret: ocr-reg-cred + dbEdition: "free" replicationType: "native" isExternalSvc: False isDeleteOraPvc: True diff --git a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_send_notification.yaml b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_send_notification.yaml index 345e9c09..2b410e8b 100644 --- a/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_send_notification.yaml +++ b/docs/sharding/provisioning/snr_system_sharding/snr_ssharding_shard_prov_send_notification.yaml @@ -63,10 +63,11 @@ spec: storageSizeInGb: 50 region: standby storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest + dbImage: container-registry.oracle.com/database/free:latest dbImagePullSecret: ocr-reg-cred gsmImage: container-registry.oracle.com/database/gsm:latest gsmImagePullSecret: ocr-reg-cred + dbEdition: "free" replicationType: "native" isExternalSvc: False isDeleteOraPvc: True diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md index e457b7eb..4d24655d 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_from_gold_image_across_ads.md @@ -38,6 +38,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov_clone_across_ads.yaml`. * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. Use the file: [ssharding_shard_prov_clone_across_ads.yaml](./ssharding_shard_prov_clone_across_ads.yaml) for this use case as below: diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md index cb01fa0d..5e44a601 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_by_cloning_db_gold_image_in_same_ad.md @@ -33,6 +33,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov_clone.yaml`. * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. Use the file: [ssharding_shard_prov_clone.yaml](./ssharding_shard_prov_clone.yaml) for this use case as below: diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_chunks_specified.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_chunks_specified.md index 0c6ea8fe..649fc7c4 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_chunks_specified.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_chunks_specified.md @@ -22,6 +22,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. Use the file: [ssharding_shard_prov_chunks.yaml](./ssharding_shard_prov_chunks.yaml) for this use case as below: diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_control_on_resources.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_control_on_resources.md index c4f45a48..d284bf9b 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_control_on_resources.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_control_on_resources.md @@ -18,6 +18,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov_memory_cpu.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. **NOTE:** For Oracle Database 23ai Free, you can control the `CPU` and `Memory` allocation of the PODs using tags `cpu` and `memory` respectively but tags `INIT_SGA_SIZE` and `INIT_PGA_SIZE` to control the SGA and PGA allocation at the database level are `not` supported. diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_notification_using_oci_notification.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_notification_using_oci_notification.md index 1a6a1ee3..e77718f4 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_notification_using_oci_notification.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_with_notification_using_oci_notification.md @@ -66,6 +66,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov_send_notification.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. **NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. diff --git a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_without_db_gold_image.md b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_without_db_gold_image.md index b223d1af..1ecb0ec1 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_provisioning_without_db_gold_image.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_provisioning_without_db_gold_image.md @@ -19,7 +19,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. - + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. Use the file: [ssharding_shard_prov.yaml](./ssharding_shard_prov.yaml) for this use case as below: diff --git a/docs/sharding/provisioning/system_sharding/ssharding_scale_in_delete_an_existing_shard.md b/docs/sharding/provisioning/system_sharding/ssharding_scale_in_delete_an_existing_shard.md index bca34253..889de98c 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_scale_in_delete_an_existing_shard.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_scale_in_delete_an_existing_shard.md @@ -18,6 +18,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov_delshard.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. NOTE: Use tag `isDelete: enable` to delete the shard you want. diff --git a/docs/sharding/provisioning/system_sharding/ssharding_scale_out_add_shards.md b/docs/sharding/provisioning/system_sharding/ssharding_scale_out_add_shards.md index 1db8e6c3..5086d887 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_scale_out_add_shards.md +++ b/docs/sharding/provisioning/system_sharding/ssharding_scale_out_add_shards.md @@ -16,6 +16,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov_extshard.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) * If the existing Sharding Topology was deployed using [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then the additional parameter `dbEdition: "free"` will be needed for the below .yaml file as well. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. This use case adds two new shards `shard4`,`shard5` to above Sharding Topology. diff --git a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov.yaml b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov.yaml index 5adbd2ce..1bdb9ce5 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov.yaml +++ b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov.yaml @@ -39,9 +39,9 @@ spec: storageSizeInGb: 50 region: standby storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 dbImagePullSecret: ocr-reg-cred - gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 gsmImagePullSecret: ocr-reg-cred isExternalSvc: False isDeleteOraPvc: True diff --git a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_chunks.yaml b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_chunks.yaml index 5c135229..868e8bc1 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_chunks.yaml +++ b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_chunks.yaml @@ -42,9 +42,9 @@ spec: storageSizeInGb: 50 region: standby storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 dbImagePullSecret: ocr-reg-cred - gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 gsmImagePullSecret: ocr-reg-cred isExternalSvc: False isDeleteOraPvc: True diff --git a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_clone.yaml b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_clone.yaml index f5816a87..3cafeba7 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_clone.yaml +++ b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_clone.yaml @@ -63,9 +63,9 @@ spec: storageSizeInGb: 50 region: standby storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 dbImagePullSecret: ocr-reg-cred - gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 gsmImagePullSecret: ocr-reg-cred isExternalSvc: False isDeleteOraPvc: True diff --git a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_clone_across_ads.yaml b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_clone_across_ads.yaml index 8fee0526..d7ec6365 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_clone_across_ads.yaml +++ b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_clone_across_ads.yaml @@ -71,9 +71,9 @@ spec: pvMatchLabels: "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 dbImagePullSecret: ocr-reg-cred - gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 gsmImagePullSecret: ocr-reg-cred isExternalSvc: False isDeleteOraPvc: True diff --git a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_delshard.yaml b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_delshard.yaml index 3902ceef..1017a9d5 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_delshard.yaml +++ b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_delshard.yaml @@ -50,9 +50,9 @@ spec: storageSizeInGb: 50 region: standby storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 dbImagePullSecret: ocr-reg-cred - gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 gsmImagePullSecret: ocr-reg-cred isExternalSvc: False isDeleteOraPvc: True diff --git a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_extshard.yaml b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_extshard.yaml index a11833e0..d23052fb 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_extshard.yaml +++ b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_extshard.yaml @@ -49,9 +49,9 @@ spec: storageSizeInGb: 50 region: standby storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 dbImagePullSecret: ocr-reg-cred - gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 gsmImagePullSecret: ocr-reg-cred isExternalSvc: False isDeleteOraPvc: True diff --git a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_memory_cpu.yaml b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_memory_cpu.yaml index 3f092b89..075919f7 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_memory_cpu.yaml +++ b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_memory_cpu.yaml @@ -70,9 +70,9 @@ spec: storageSizeInGb: 50 region: standby storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 dbImagePullSecret: ocr-reg-cred - gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 gsmImagePullSecret: ocr-reg-cred isExternalSvc: False isDeleteOraPvc: True diff --git a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_send_notification.yaml b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_send_notification.yaml index 0ca6ec6f..aea6fc7c 100644 --- a/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_send_notification.yaml +++ b/docs/sharding/provisioning/system_sharding/ssharding_shard_prov_send_notification.yaml @@ -63,9 +63,9 @@ spec: storageSizeInGb: 50 region: standby storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 dbImagePullSecret: ocr-reg-cred - gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 gsmImagePullSecret: ocr-reg-cred isExternalSvc: False isDeleteOraPvc: True diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_from_gold_image_across_ads.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_from_gold_image_across_ads.md index 9b2905e8..e55df2de 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_from_gold_image_across_ads.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_from_gold_image_across_ads.md @@ -36,6 +36,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `udsharding_shard_prov_clone_across_ads.yaml`. * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. **NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_gold_image_in_same_ad.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_gold_image_in_same_ad.md index a4669667..edd9c484 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_gold_image_in_same_ad.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_by_cloning_db_gold_image_in_same_ad.md @@ -32,6 +32,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `ssharding_shard_prov_clone.yaml`. * The `dbImage` used during provisioning the Persistent Volume with Database Gold Image and the `dbImage` used for deploying the Shard or Catalog Database by cloning should be same. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. **NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_control_on_resources.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_control_on_resources.md index b52b8745..638b7124 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_control_on_resources.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_control_on_resources.md @@ -19,6 +19,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `udsharding_shard_prov_memory_cpu.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. **NOTE:** For Oracle Database 23ai Free, you can control the `CPU` and `Memory` allocation of the PODs using tags `cpu` and `memory` respectively but tags `INIT_SGA_SIZE` and `INIT_PGA_SIZE` to control the SGA and PGA allocation at the database level are `not` supported. diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_notification_using_oci_notification.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_notification_using_oci_notification.md index 640301a2..fe1ca870 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_notification_using_oci_notification.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_with_notification_using_oci_notification.md @@ -67,6 +67,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `udsharding_shard_prov_send_notification.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. **NOTE:** Provisioning the Sharded Database using Cloning from Database Gold Image is `NOT` supported with Oracle Database 23ai Free. diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_without_db_gold_image.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_without_db_gold_image.md index 2be5ac9f..b0378e04 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_without_db_gold_image.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_provisioning_without_db_gold_image.md @@ -20,6 +20,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `udsharding_shard_prov.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. Use the file: [udsharding_shard_prov.yaml](./udsharding_shard_prov.yaml) for this use case as below: diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md index 2c4cbfc2..673e455e 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_in_delete_an_existing_shard.md @@ -17,6 +17,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `udsharding_shard_prov_delshard.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) * In case you want to use the [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then you will need to add the additional parameter `dbEdition: "free"` to the below .yaml file. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. **NOTE:** Use tag `isDelete: enable` to delete the shard you want. diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_out_add_shards.md b/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_out_add_shards.md index 20f50b29..abdc53ff 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_out_add_shards.md +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_scale_out_add_shards.md @@ -17,6 +17,7 @@ In this example, we are using pre-built Oracle Database and Global Data Services * If you plan to use images built by you, you need to change `dbImage` and `gsmImage` tag with the images you have built in your enviornment in file `udsharding_shard_prov_extshard.yaml`. * To understand the Pre-requisite of Database and Global Data Services docker images, refer [Oracle Database and Global Data Services Docker Images](../../README.md#3-oracle-database-and-global-data-services-docker-images) * If the existing Sharding Topology was deployed using [Oracle Database 23ai Free](https://www.oracle.com/database/free/get-started/) Image for Database and GSM, then the additional parameter `dbEdition: "free"` will be needed for the below .yaml file as well. + * Make sure the version of `openssl` in the Oracle Database and Oracle GSM images is compatible with the `openssl` version on the machine where you will run the openssl commands to generated the encrypted password file during the deployment. This use case adds two new shards `shard4`,`shard5` to above Sharding Topology. diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov.yaml b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov.yaml index d33be599..c9f20eb3 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov.yaml +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov.yaml @@ -39,9 +39,9 @@ spec: storageSizeInGb: 50 region: standby storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 dbImagePullSecret: ocr-reg-cred - gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 gsmImagePullSecret: ocr-reg-cred shardingType: USER isExternalSvc: False diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_clone.yaml b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_clone.yaml index 04ee5d95..d7e5ce78 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_clone.yaml +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_clone.yaml @@ -63,9 +63,9 @@ spec: storageSizeInGb: 50 region: standby storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 dbImagePullSecret: ocr-reg-cred - gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 gsmImagePullSecret: ocr-reg-cred shardingType: USER isExternalSvc: False diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_clone_across_ads.yaml b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_clone_across_ads.yaml index 5be6ecde..ae02c7fe 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_clone_across_ads.yaml +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_clone_across_ads.yaml @@ -71,9 +71,9 @@ spec: pvMatchLabels: "failure-domain.beta.kubernetes.io/zone": "PHX-AD-3" storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 dbImagePullSecret: ocr-reg-cred - gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 gsmImagePullSecret: ocr-reg-cred shardingType: USER isExternalSvc: False diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_delshard.yaml b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_delshard.yaml index e00d2272..d83bf546 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_delshard.yaml +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_delshard.yaml @@ -50,9 +50,9 @@ spec: storageSizeInGb: 50 region: standby storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 dbImagePullSecret: ocr-reg-cred - gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 gsmImagePullSecret: ocr-reg-cred shardingType: USER isExternalSvc: False diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_extshard.yaml b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_extshard.yaml index 3899f2ab..7526feb7 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_extshard.yaml +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_extshard.yaml @@ -48,9 +48,9 @@ spec: storageSizeInGb: 50 region: standby storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 dbImagePullSecret: ocr-reg-cred - gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 gsmImagePullSecret: ocr-reg-cred shardingType: USER isExternalSvc: False diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_memory_cpu.yaml b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_memory_cpu.yaml index 6c65916e..8be81d39 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_memory_cpu.yaml +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_memory_cpu.yaml @@ -70,9 +70,9 @@ spec: storageSizeInGb: 50 region: standby storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 dbImagePullSecret: ocr-reg-cred - gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 gsmImagePullSecret: ocr-reg-cred shardingType: USER isExternalSvc: False diff --git a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_send_notification.yaml b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_send_notification.yaml index ef1b5561..4dda6db9 100644 --- a/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_send_notification.yaml +++ b/docs/sharding/provisioning/user-defined-sharding/udsharding_shard_prov_send_notification.yaml @@ -63,9 +63,9 @@ spec: storageSizeInGb: 50 region: standby storageClass: oci - dbImage: container-registry.oracle.com/database/enterprise:latest + dbImage: container-registry.oracle.com/database/enterprise_ru:19.25.0.0 dbImagePullSecret: ocr-reg-cred - gsmImage: container-registry.oracle.com/database/gsm:latest + gsmImage: container-registry.oracle.com/database/gsm_ru:19.25.0.0 gsmImagePullSecret: ocr-reg-cred shardingType: USER isExternalSvc: False From 559238b6a55bbc90bfc34203d6464251153f01a9 Mon Sep 17 00:00:00 2001 From: Saurabh Ahuja Date: Mon, 7 Apr 2025 23:07:10 +0530 Subject: [PATCH 20/24] Upgrade support from v1alpha1 to v4 APIs (#173) * compiled misssed files to support v1alpha1 to v4 upgrade * Update oracle-database-operator.yaml * Update kustomization.yaml --- apis/database/v1alpha1/cdb_webhook.go | 224 +++++++ apis/database/v1alpha1/pdb_webhook.go | 369 +++++++++++ .../v1alpha1/zz_generated.deepcopy.go | 602 +++++++++++++++++ .../crd/bases/database.oracle.com_cdbs.yaml | 238 +++++++ .../database.oracle.com_dataguardbrokers.yaml | 4 +- .../crd/bases/database.oracle.com_pdbs.yaml | 315 ++++++++- ...database.oracle.com_shardingdatabases.yaml | 9 - config/manager/kustomization.yaml | 4 +- config/webhook/manifests.yaml | 65 +- oracle-database-operator.yaml | 621 ++++++++++++++++-- 10 files changed, 2356 insertions(+), 95 deletions(-) create mode 100644 apis/database/v1alpha1/cdb_webhook.go create mode 100644 apis/database/v1alpha1/pdb_webhook.go diff --git a/apis/database/v1alpha1/cdb_webhook.go b/apis/database/v1alpha1/cdb_webhook.go new file mode 100644 index 00000000..e93e216e --- /dev/null +++ b/apis/database/v1alpha1/cdb_webhook.go @@ -0,0 +1,224 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +package v1alpha1 + +import ( + "reflect" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var cdblog = logf.Log.WithName("cdb-webhook") + +func (r *CDB) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +//+kubebuilder:webhook:path=/mutate-database-oracle-com-v4-cdb,mutating=true,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=cdbs,verbs=create;update,versions=v4,name=mcdb.kb.io,admissionReviewVersions={v1,v1beta1} + +var _ webhook.Defaulter = &CDB{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *CDB) Default() { + cdblog.Info("Setting default values in CDB spec for : " + r.Name) + + if r.Spec.ORDSPort == 0 { + r.Spec.ORDSPort = 8888 + } + + if r.Spec.Replicas == 0 { + r.Spec.Replicas = 1 + } +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:path=/validate-database-oracle-com-v4-cdb,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=cdbs,verbs=create;update,versions=v4,name=vcdb.kb.io,admissionReviewVersions={v1,v1beta1} + +var _ webhook.Validator = &CDB{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *CDB) ValidateCreate() (admission.Warnings, error) { + cdblog.Info("ValidateCreate", "name", r.Name) + + var allErrs field.ErrorList + + if r.Spec.ServiceName == "" && r.Spec.DBServer != "" { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("serviceName"), "Please specify CDB Service name")) + } + + if reflect.ValueOf(r.Spec.CDBTlsKey).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("cdbTlsKey"), "Please specify CDB Tls key(secret)")) + } + + if reflect.ValueOf(r.Spec.CDBTlsCrt).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("cdbTlsCrt"), "Please specify CDB Tls Certificate(secret)")) + } + + if reflect.ValueOf(r.Spec.CDBPriKey).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("CDBPriKey"), "Please specify CDB CDBPriKey(secret)")) + } + + /*if r.Spec.SCANName == "" { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("scanName"), "Please specify SCAN Name for CDB")) + }*/ + + if (r.Spec.DBServer == "" && r.Spec.DBTnsurl == "") || (r.Spec.DBServer != "" && r.Spec.DBTnsurl != "") { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("dbServer"), "Please specify Database Server Name/IP Address or tnsalias string")) + } + + if r.Spec.DBTnsurl != "" && (r.Spec.DBServer != "" || r.Spec.DBPort != 0 || r.Spec.ServiceName != "") { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("dbServer"), "DBtnsurl is orthogonal to (DBServer,DBport,Services)")) + } + + if r.Spec.DBPort == 0 && r.Spec.DBServer != "" { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("dbPort"), "Please specify DB Server Port")) + } + if r.Spec.DBPort < 0 && r.Spec.DBServer != "" { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("dbPort"), "Please specify a valid DB Server Port")) + } + if r.Spec.ORDSPort < 0 { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("ordsPort"), "Please specify a valid ORDS Port")) + } + if r.Spec.Replicas < 0 { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("replicas"), "Please specify a valid value for Replicas")) + } + if r.Spec.ORDSImage == "" { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("ordsImage"), "Please specify name of ORDS Image to be used")) + } + if reflect.ValueOf(r.Spec.CDBAdminUser).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("cdbAdminUser"), "Please specify user in the root container with sysdba priviledges to manage PDB lifecycle")) + } + if reflect.ValueOf(r.Spec.CDBAdminPwd).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("cdbAdminPwd"), "Please specify password for the CDB Administrator to manage PDB lifecycle")) + } + if reflect.ValueOf(r.Spec.ORDSPwd).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("ordsPwd"), "Please specify password for user ORDS_PUBLIC_USER")) + } + if reflect.ValueOf(r.Spec.WebServerUser).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("webServerUser"), "Please specify the Web Server User having SQL Administrator role")) + } + if reflect.ValueOf(r.Spec.WebServerPwd).IsZero() { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("webServerPwd"), "Please specify password for the Web Server User having SQL Administrator role")) + } + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "CDB"}, + r.Name, allErrs) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *CDB) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + cdblog.Info("validate update", "name", r.Name) + + isCDBMarkedToBeDeleted := r.GetDeletionTimestamp() != nil + if isCDBMarkedToBeDeleted { + return nil, nil + } + + var allErrs field.ErrorList + + // Check for updation errors + oldCDB, ok := old.(*CDB) + if !ok { + return nil, nil + } + + if r.Spec.DBPort < 0 { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("dbPort"), "Please specify a valid DB Server Port")) + } + if r.Spec.ORDSPort < 0 { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("ordsPort"), "Please specify a valid ORDS Port")) + } + if r.Spec.Replicas < 0 { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("replicas"), "Please specify a valid value for Replicas")) + } + if !strings.EqualFold(oldCDB.Spec.ServiceName, r.Spec.ServiceName) { + allErrs = append(allErrs, + field.Forbidden(field.NewPath("spec").Child("replicas"), "cannot be changed")) + } + + if len(allErrs) == 0 { + return nil, nil + } + + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "CDB"}, + r.Name, allErrs) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *CDB) ValidateDelete() (admission.Warnings, error) { + cdblog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} diff --git a/apis/database/v1alpha1/pdb_webhook.go b/apis/database/v1alpha1/pdb_webhook.go new file mode 100644 index 00000000..1f115c9b --- /dev/null +++ b/apis/database/v1alpha1/pdb_webhook.go @@ -0,0 +1,369 @@ +/* +** Copyright (c) 2022 Oracle and/or its affiliates. +** +** The Universal Permissive License (UPL), Version 1.0 +** +** Subject to the condition set forth below, permission is hereby granted to any +** person obtaining a copy of this software, associated documentation and/or data +** (collectively the "Software"), free of charge and under any and all copyright +** rights in the Software, and any and all patent rights owned or freely +** licensable by each licensor hereunder covering either (i) the unmodified +** Software as contributed to or provided by such licensor, or (ii) the Larger +** Works (as defined below), to deal in both +** +** (a) the Software, and +** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +** one is included with the Software (each a "Larger Work" to which the Software +** is contributed by such licensors), +** +** without restriction, including without limitation the rights to copy, create +** derivative works of, display, perform, and distribute the Software and make, +** use, sell, offer for sale, import, export, have made, and have sold the +** Software and the Larger Work(s), and to sublicense the foregoing rights on +** either these or other terms. +** +** This license is subject to the following condition: +** The above copyright notice and either this complete permission notice or at +** a minimum a reference to the UPL must be included in all copies or +** substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +** SOFTWARE. + */ + +/* MODIFIED (MM/DD/YY) +** rcitton 07/14/22 - 33822886 + */ + +package v1alpha1 + +import ( + "reflect" + "strconv" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// log is for logging in this package. +var pdblog = logf.Log.WithName("pdb-webhook") + +func (r *PDB) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +//+kubebuilder:webhook:path=/mutate-database-oracle-com-v4-pdb,mutating=true,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=pdbs,verbs=create;update,versions=v4,name=mpdb.kb.io,admissionReviewVersions={v1,v1beta1} + +var _ webhook.Defaulter = &PDB{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *PDB) Default() { + pdblog.Info("Setting default values in PDB spec for : " + r.Name) + + action := strings.ToUpper(r.Spec.Action) + + if action == "DELETE" { + if r.Spec.DropAction == "" { + r.Spec.DropAction = "INCLUDING" + pdblog.Info(" - dropAction : INCLUDING") + } + } else if action != "MODIFY" && action != "STATUS" { + if r.Spec.ReuseTempFile == nil { + r.Spec.ReuseTempFile = new(bool) + *r.Spec.ReuseTempFile = true + pdblog.Info(" - reuseTempFile : " + strconv.FormatBool(*(r.Spec.ReuseTempFile))) + } + if r.Spec.UnlimitedStorage == nil { + r.Spec.UnlimitedStorage = new(bool) + *r.Spec.UnlimitedStorage = true + pdblog.Info(" - unlimitedStorage : " + strconv.FormatBool(*(r.Spec.UnlimitedStorage))) + } + if r.Spec.TDEImport == nil { + r.Spec.TDEImport = new(bool) + *r.Spec.TDEImport = false + pdblog.Info(" - tdeImport : " + strconv.FormatBool(*(r.Spec.TDEImport))) + } + if r.Spec.TDEExport == nil { + r.Spec.TDEExport = new(bool) + *r.Spec.TDEExport = false + pdblog.Info(" - tdeExport : " + strconv.FormatBool(*(r.Spec.TDEExport))) + } + if r.Spec.AsClone == nil { + r.Spec.AsClone = new(bool) + *r.Spec.AsClone = false + pdblog.Info(" - asClone : " + strconv.FormatBool(*(r.Spec.AsClone))) + } + + } + + if r.Spec.GetScript == nil { + r.Spec.GetScript = new(bool) + *r.Spec.GetScript = false + pdblog.Info(" - getScript : " + strconv.FormatBool(*(r.Spec.GetScript))) + } +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:path=/validate-database-oracle-com-v4-pdb,mutating=false,failurePolicy=fail,sideEffects=None,groups=database.oracle.com,resources=pdbs,verbs=create;update,versions=v4,name=vpdb.kb.io,admissionReviewVersions={v1,v1beta1} + +var _ webhook.Validator = &PDB{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *PDB) ValidateCreate() (admission.Warnings, error) { + pdblog.Info("ValidateCreate-Validating PDB spec for : " + r.Name) + + var allErrs field.ErrorList + + r.validateCommon(&allErrs) + + r.validateAction(&allErrs) + + action := strings.ToUpper(r.Spec.Action) + + if len(allErrs) == 0 { + pdblog.Info("PDB Resource : " + r.Name + " successfully validated for Action : " + action) + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "PDB"}, + r.Name, allErrs) +} + +// Validate Action for required parameters +func (r *PDB) validateAction(allErrs *field.ErrorList) { + action := strings.ToUpper(r.Spec.Action) + + pdblog.Info("Valdiating PDB Resource Action : " + action) + + if reflect.ValueOf(r.Spec.PDBTlsKey).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("pdbTlsKey"), "Please specify PDB Tls Key(secret)")) + } + + if reflect.ValueOf(r.Spec.PDBTlsCrt).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("pdbTlsCrt"), "Please specify PDB Tls Certificate(secret)")) + } + + if reflect.ValueOf(r.Spec.PDBTlsCat).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("pdbTlsCat"), "Please specify PDB Tls Certificate Authority(secret)")) + } + if reflect.ValueOf(r.Spec.PDBPriKey).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("pdbOrdsPrvKey"), "Please specify PDB Tls Certificate Authority(secret)")) + } + + switch action { + case "DELETE": + /* BUG 36752336 - LREST OPERATOR - DELETE NON-EXISTENT PDB SHOWS LRPDB CREATED MESSAGE */ + if r.Status.OpenMode == "READ WRITE" { + pdblog.Info("Cannot delete: pdb is open ") + *allErrs = append(*allErrs, field.Invalid(field.NewPath("status").Child("OpenMode"), "READ WRITE", "pdb "+r.Spec.PDBName+" "+r.Status.OpenMode)) + } + r.CheckObjExistence("DELETE", allErrs, r) + case "CREATE": + if reflect.ValueOf(r.Spec.AdminName).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("adminName"), "Please specify PDB System Administrator user")) + } + if reflect.ValueOf(r.Spec.AdminPwd).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("adminPwd"), "Please specify PDB System Administrator Password")) + } + if reflect.ValueOf(r.Spec.WebServerUsr).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("WebServerUser"), "Please specify the http webServerUser")) + } + if reflect.ValueOf(r.Spec.WebServerPwd).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("webServerPwd"), "Please specify the http webserverPassword")) + } + + if r.Spec.FileNameConversions == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("fileNameConversions"), "Please specify a value for fileNameConversions. Values can be a filename convert pattern or NONE")) + } + if r.Spec.TotalSize == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("totalSize"), "When the storage is not UNLIMITED the Total Size must be specified")) + } + if r.Spec.TempSize == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("tempSize"), "When the storage is not UNLIMITED the Temp Size must be specified")) + } + if *(r.Spec.TDEImport) { + r.validateTDEInfo(allErrs) + } + case "CLONE": + // Sample Err: The PDB "pdb1-clone" is invalid: spec.srcPdbName: Required value: Please specify source PDB for Cloning + if r.Spec.SrcPDBName == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("srcPdbName"), "Please specify source PDB name for Cloning")) + } + if r.Spec.TotalSize == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("totalSize"), "When the storage is not UNLIMITED the Total Size must be specified")) + } + if r.Spec.TempSize == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("tempSize"), "When the storage is not UNLIMITED the Temp Size must be specified")) + } + /* We don't need this check as ords open the pdb before cloninig */ + /* + if r.Status.OpenMode == "MOUNTED" { + pdblog.Info("Cannot clone: pdb is mount ") + *allErrs = append(*allErrs, field.Invalid(field.NewPath("status").Child("OpenMode"), "READ WRITE", "pdb "+r.Spec.PDBName+" "+r.Status.OpenMode)) + } + */ + case "PLUG": + if r.Spec.XMLFileName == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("xmlFileName"), "Please specify XML metadata filename")) + } + if r.Spec.FileNameConversions == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("fileNameConversions"), "Please specify a value for fileNameConversions. Values can be a filename convert pattern or NONE")) + } + if r.Spec.SourceFileNameConversions == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("sourceFileNameConversions"), "Please specify a value for sourceFileNameConversions. Values can be a filename convert pattern or NONE")) + } + if r.Spec.CopyAction == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("copyAction"), "Please specify a value for copyAction. Values can be COPY, NOCOPY or MOVE")) + } + if *(r.Spec.TDEImport) { + r.validateTDEInfo(allErrs) + } + case "UNPLUG": + if r.Spec.XMLFileName == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("xmlFileName"), "Please specify XML metadata filename")) + } + if *(r.Spec.TDEExport) { + r.validateTDEInfo(allErrs) + } + if r.Status.OpenMode == "READ WRITE" { + pdblog.Info("Cannot unplug: pdb is open ") + *allErrs = append(*allErrs, field.Invalid(field.NewPath("status").Child("OpenMode"), "READ WRITE", "pdb "+r.Spec.PDBName+" "+r.Status.OpenMode)) + } + r.CheckObjExistence("UNPLUG", allErrs, r) + case "MODIFY": + if r.Spec.PDBState == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("pdbState"), "Please specify target state of PDB")) + } + if r.Spec.ModifyOption == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("modifyOption"), "Please specify an option for opening/closing a PDB")) + } + r.CheckObjExistence("MODIY", allErrs, r) + } +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *PDB) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + pdblog.Info("ValidateUpdate-Validating PDB spec for : " + r.Name) + + isPDBMarkedToBeDeleted := r.GetDeletionTimestamp() != nil + if isPDBMarkedToBeDeleted { + return nil, nil + } + + var allErrs field.ErrorList + action := strings.ToUpper(r.Spec.Action) + + // If PDB CR has been created and in Ready state, only allow updates if the "action" value has changed as well + if (r.Status.Phase == "Ready") && (r.Status.Action != "MODIFY") && (r.Status.Action != "STATUS") && (r.Status.Action == action) { + allErrs = append(allErrs, + field.Required(field.NewPath("spec").Child("action"), "New action also needs to be specified after PDB is in Ready state")) + } else { + + // Check Common Validations + r.validateCommon(&allErrs) + + // Validate required parameters for Action specified + r.validateAction(&allErrs) + + // Check TDE requirements + if (action != "DELETE") && (action != "MODIFY") && (action != "STATUS") && (*(r.Spec.TDEImport) || *(r.Spec.TDEExport)) { + r.validateTDEInfo(&allErrs) + } + } + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "database.oracle.com", Kind: "PDB"}, + r.Name, allErrs) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *PDB) ValidateDelete() (admission.Warnings, error) { + pdblog.Info("ValidateDelete-Validating PDB spec for : " + r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} + +// Validate common specs needed for all PDB Actions +func (r *PDB) validateCommon(allErrs *field.ErrorList) { + pdblog.Info("validateCommon", "name", r.Name) + + if r.Spec.Action == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("action"), "Please specify PDB operation to be performed")) + } + if r.Spec.CDBResName == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("cdbResName"), "Please specify the name of the CDB Kubernetes resource to use for PDB operations")) + } + if r.Spec.PDBName == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("pdbName"), "Please specify name of the PDB to be created")) + } +} + +// Validate TDE information for Create, Plug and Unplug Actions +func (r *PDB) validateTDEInfo(allErrs *field.ErrorList) { + pdblog.Info("validateTDEInfo", "name", r.Name) + + if reflect.ValueOf(r.Spec.TDEPassword).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("tdePassword"), "Please specify a value for tdePassword.")) + } + if r.Spec.TDEKeystorePath == "" { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("tdeKeystorePath"), "Please specify a value for tdeKeystorePath.")) + } + if reflect.ValueOf(r.Spec.TDESecret).IsZero() { + *allErrs = append(*allErrs, + field.Required(field.NewPath("spec").Child("tdeSecret"), "Please specify a value for tdeSecret.")) + } + +} + +func (r *PDB) CheckObjExistence(action string, allErrs *field.ErrorList, pdb *PDB) { + /* BUG 36752465 - lrest operator - open non-existent pdb creates a lrpdb with status failed */ + pdblog.Info("Action [" + action + "] checkin " + pdb.Spec.PDBName + " existence") + if pdb.Status.OpenMode == "" { + *allErrs = append(*allErrs, field.NotFound(field.NewPath("Spec").Child("PDBName"), " "+pdb.Spec.PDBName+" does not exist : action "+action+" failure")) + + } +} diff --git a/apis/database/v1alpha1/zz_generated.deepcopy.go b/apis/database/v1alpha1/zz_generated.deepcopy.go index d0426da8..b20cf834 100644 --- a/apis/database/v1alpha1/zz_generated.deepcopy.go +++ b/apis/database/v1alpha1/zz_generated.deepcopy.go @@ -683,6 +683,239 @@ func (in *Backupconfig) DeepCopy() *Backupconfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDB) DeepCopyInto(out *CDB) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDB. +func (in *CDB) DeepCopy() *CDB { + if in == nil { + return nil + } + out := new(CDB) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CDB) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBAdminPassword) DeepCopyInto(out *CDBAdminPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBAdminPassword. +func (in *CDBAdminPassword) DeepCopy() *CDBAdminPassword { + if in == nil { + return nil + } + out := new(CDBAdminPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBAdminUser) DeepCopyInto(out *CDBAdminUser) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBAdminUser. +func (in *CDBAdminUser) DeepCopy() *CDBAdminUser { + if in == nil { + return nil + } + out := new(CDBAdminUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBList) DeepCopyInto(out *CDBList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CDB, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBList. +func (in *CDBList) DeepCopy() *CDBList { + if in == nil { + return nil + } + out := new(CDBList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CDBList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBPRIVKEY) DeepCopyInto(out *CDBPRIVKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBPRIVKEY. +func (in *CDBPRIVKEY) DeepCopy() *CDBPRIVKEY { + if in == nil { + return nil + } + out := new(CDBPRIVKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBPUBKEY) DeepCopyInto(out *CDBPUBKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBPUBKEY. +func (in *CDBPUBKEY) DeepCopy() *CDBPUBKEY { + if in == nil { + return nil + } + out := new(CDBPUBKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBSecret) DeepCopyInto(out *CDBSecret) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBSecret. +func (in *CDBSecret) DeepCopy() *CDBSecret { + if in == nil { + return nil + } + out := new(CDBSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBSpec) DeepCopyInto(out *CDBSpec) { + *out = *in + out.SysAdminPwd = in.SysAdminPwd + out.CDBAdminUser = in.CDBAdminUser + out.CDBAdminPwd = in.CDBAdminPwd + out.CDBTlsKey = in.CDBTlsKey + out.CDBTlsCrt = in.CDBTlsCrt + out.ORDSPwd = in.ORDSPwd + out.WebServerUser = in.WebServerUser + out.WebServerPwd = in.WebServerPwd + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.CDBPubKey = in.CDBPubKey + out.CDBPriKey = in.CDBPriKey +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBSpec. +func (in *CDBSpec) DeepCopy() *CDBSpec { + if in == nil { + return nil + } + out := new(CDBSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBStatus) DeepCopyInto(out *CDBStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBStatus. +func (in *CDBStatus) DeepCopy() *CDBStatus { + if in == nil { + return nil + } + out := new(CDBStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBSysAdminPassword) DeepCopyInto(out *CDBSysAdminPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBSysAdminPassword. +func (in *CDBSysAdminPassword) DeepCopy() *CDBSysAdminPassword { + if in == nil { + return nil + } + out := new(CDBSysAdminPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBTLSCRT) DeepCopyInto(out *CDBTLSCRT) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBTLSCRT. +func (in *CDBTLSCRT) DeepCopy() *CDBTLSCRT { + if in == nil { + return nil + } + out := new(CDBTLSCRT) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDBTLSKEY) DeepCopyInto(out *CDBTLSKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDBTLSKEY. +func (in *CDBTLSKEY) DeepCopy() *CDBTLSKEY { + if in == nil { + return nil + } + out := new(CDBTLSKEY) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CatalogSpec) DeepCopyInto(out *CatalogSpec) { *out = *in @@ -1470,6 +1703,22 @@ func (in *KMSDetailsStatus) DeepCopy() *KMSDetailsStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ORDSPassword) DeepCopyInto(out *ORDSPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ORDSPassword. +func (in *ORDSPassword) DeepCopy() *ORDSPassword { + if in == nil { + return nil + } + out := new(ORDSPassword) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OciAcdSpec) DeepCopyInto(out *OciAcdSpec) { *out = *in @@ -1738,6 +1987,65 @@ func (in *OracleRestDataServiceStatus) DeepCopy() *OracleRestDataServiceStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDB) DeepCopyInto(out *PDB) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDB. +func (in *PDB) DeepCopy() *PDB { + if in == nil { + return nil + } + out := new(PDB) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PDB) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBAdminName) DeepCopyInto(out *PDBAdminName) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBAdminName. +func (in *PDBAdminName) DeepCopy() *PDBAdminName { + if in == nil { + return nil + } + out := new(PDBAdminName) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBAdminPassword) DeepCopyInto(out *PDBAdminPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBAdminPassword. +func (in *PDBAdminPassword) DeepCopy() *PDBAdminPassword { + if in == nil { + return nil + } + out := new(PDBAdminPassword) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PDBConfig) DeepCopyInto(out *PDBConfig) { *out = *in @@ -1849,6 +2157,204 @@ func (in *PDBDetailsStatus) DeepCopy() *PDBDetailsStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBList) DeepCopyInto(out *PDBList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PDB, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBList. +func (in *PDBList) DeepCopy() *PDBList { + if in == nil { + return nil + } + out := new(PDBList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PDBList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBPRIVKEY) DeepCopyInto(out *PDBPRIVKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBPRIVKEY. +func (in *PDBPRIVKEY) DeepCopy() *PDBPRIVKEY { + if in == nil { + return nil + } + out := new(PDBPRIVKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBPUBKEY) DeepCopyInto(out *PDBPUBKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBPUBKEY. +func (in *PDBPUBKEY) DeepCopy() *PDBPUBKEY { + if in == nil { + return nil + } + out := new(PDBPUBKEY) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBSecret) DeepCopyInto(out *PDBSecret) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBSecret. +func (in *PDBSecret) DeepCopy() *PDBSecret { + if in == nil { + return nil + } + out := new(PDBSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBSpec) DeepCopyInto(out *PDBSpec) { + *out = *in + out.PDBTlsKey = in.PDBTlsKey + out.PDBTlsCrt = in.PDBTlsCrt + out.PDBTlsCat = in.PDBTlsCat + out.AdminName = in.AdminName + out.AdminPwd = in.AdminPwd + out.WebServerUsr = in.WebServerUsr + out.WebServerPwd = in.WebServerPwd + if in.ReuseTempFile != nil { + in, out := &in.ReuseTempFile, &out.ReuseTempFile + *out = new(bool) + **out = **in + } + if in.UnlimitedStorage != nil { + in, out := &in.UnlimitedStorage, &out.UnlimitedStorage + *out = new(bool) + **out = **in + } + if in.AsClone != nil { + in, out := &in.AsClone, &out.AsClone + *out = new(bool) + **out = **in + } + if in.TDEImport != nil { + in, out := &in.TDEImport, &out.TDEImport + *out = new(bool) + **out = **in + } + if in.TDEExport != nil { + in, out := &in.TDEExport, &out.TDEExport + *out = new(bool) + **out = **in + } + out.TDEPassword = in.TDEPassword + out.TDESecret = in.TDESecret + if in.GetScript != nil { + in, out := &in.GetScript, &out.GetScript + *out = new(bool) + **out = **in + } + out.PDBPubKey = in.PDBPubKey + out.PDBPriKey = in.PDBPriKey +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBSpec. +func (in *PDBSpec) DeepCopy() *PDBSpec { + if in == nil { + return nil + } + out := new(PDBSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBStatus) DeepCopyInto(out *PDBStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBStatus. +func (in *PDBStatus) DeepCopy() *PDBStatus { + if in == nil { + return nil + } + out := new(PDBStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBTLSCAT) DeepCopyInto(out *PDBTLSCAT) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBTLSCAT. +func (in *PDBTLSCAT) DeepCopy() *PDBTLSCAT { + if in == nil { + return nil + } + out := new(PDBTLSCAT) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBTLSCRT) DeepCopyInto(out *PDBTLSCRT) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBTLSCRT. +func (in *PDBTLSCRT) DeepCopy() *PDBTLSCRT { + if in == nil { + return nil + } + out := new(PDBTLSCRT) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PDBTLSKEY) DeepCopyInto(out *PDBTLSKEY) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PDBTLSKEY. +func (in *PDBTLSKEY) DeepCopy() *PDBTLSKEY { + if in == nil { + return nil + } + out := new(PDBTLSKEY) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PasswordSpec) DeepCopyInto(out *PasswordSpec) { *out = *in @@ -2414,6 +2920,38 @@ func (in *SourceSpec) DeepCopy() *SourceSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TDEPwd) DeepCopyInto(out *TDEPwd) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TDEPwd. +func (in *TDEPwd) DeepCopy() *TDEPwd { + if in == nil { + return nil + } + out := new(TDEPwd) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TDESecret) DeepCopyInto(out *TDESecret) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TDESecret. +func (in *TDESecret) DeepCopy() *TDESecret { + if in == nil { + return nil + } + out := new(TDESecret) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TargetSpec) DeepCopyInto(out *TargetSpec) { *out = *in @@ -2486,3 +3024,67 @@ func (in *WalletSpec) DeepCopy() *WalletSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebServerPassword) DeepCopyInto(out *WebServerPassword) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebServerPassword. +func (in *WebServerPassword) DeepCopy() *WebServerPassword { + if in == nil { + return nil + } + out := new(WebServerPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebServerPasswordPDB) DeepCopyInto(out *WebServerPasswordPDB) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebServerPasswordPDB. +func (in *WebServerPasswordPDB) DeepCopy() *WebServerPasswordPDB { + if in == nil { + return nil + } + out := new(WebServerPasswordPDB) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebServerUser) DeepCopyInto(out *WebServerUser) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebServerUser. +func (in *WebServerUser) DeepCopy() *WebServerUser { + if in == nil { + return nil + } + out := new(WebServerUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebServerUserPDB) DeepCopyInto(out *WebServerUserPDB) { + *out = *in + out.Secret = in.Secret +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebServerUserPDB. +func (in *WebServerUserPDB) DeepCopy() *WebServerUserPDB { + if in == nil { + return nil + } + out := new(WebServerUserPDB) + in.DeepCopyInto(out) + return out +} diff --git a/config/crd/bases/database.oracle.com_cdbs.yaml b/config/crd/bases/database.oracle.com_cdbs.yaml index 8ea594e6..924946ee 100644 --- a/config/crd/bases/database.oracle.com_cdbs.yaml +++ b/config/crd/bases/database.oracle.com_cdbs.yaml @@ -14,6 +14,244 @@ spec: singular: cdb scope: Namespaced versions: + - additionalPrinterColumns: + - description: Name of the CDB + jsonPath: .spec.cdbName + name: CDB Name + type: string + - description: ' Name of the DB Server' + jsonPath: .spec.dbServer + name: DB Server + type: string + - description: DB server port + jsonPath: .spec.dbPort + name: DB Port + type: integer + - description: Replicas + jsonPath: .spec.replicas + name: Replicas + type: integer + - description: Status of the CDB Resource + jsonPath: .status.phase + name: Status + type: string + - description: Error message, if any + jsonPath: .status.msg + name: Message + type: string + - description: ' string of the tnsalias' + jsonPath: .spec.dbTnsurl + name: TNS STRING + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + cdbAdminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbAdminUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbName: + type: string + cdbOrdsPrvKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbOrdsPubKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbTlsCrt: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbTlsKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + dbPort: + type: integer + dbServer: + type: string + dbTnsurl: + type: string + deletePdbCascade: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + ordsImage: + type: string + ordsImagePullPolicy: + enum: + - Always + - Never + type: string + ordsImagePullSecret: + type: string + ordsPort: + type: integer + ordsPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + replicas: + type: integer + serviceName: + type: string + sysAdminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + type: object + status: + properties: + msg: + type: string + phase: + type: string + status: + type: boolean + required: + - phase + - status + type: object + type: object + served: true + storage: false + subresources: + status: {} - additionalPrinterColumns: - description: Name of the CDB jsonPath: .spec.cdbName diff --git a/config/crd/bases/database.oracle.com_dataguardbrokers.yaml b/config/crd/bases/database.oracle.com_dataguardbrokers.yaml index 5efceff4..0e27126d 100644 --- a/config/crd/bases/database.oracle.com_dataguardbrokers.yaml +++ b/config/crd/bases/database.oracle.com_dataguardbrokers.yaml @@ -94,7 +94,7 @@ spec: externalConnectString: type: string fastStartFailover: - type: boolean + type: string primaryDatabase: type: string primaryDatabaseRef: @@ -191,7 +191,7 @@ spec: externalConnectString: type: string fastStartFailover: - type: boolean + type: string primaryDatabase: type: string primaryDatabaseRef: diff --git a/config/crd/bases/database.oracle.com_pdbs.yaml b/config/crd/bases/database.oracle.com_pdbs.yaml index b674f856..b2f37ac9 100644 --- a/config/crd/bases/database.oracle.com_pdbs.yaml +++ b/config/crd/bases/database.oracle.com_pdbs.yaml @@ -43,7 +43,7 @@ spec: jsonPath: .status.connString name: Connect_String type: string - name: v4 + name: v1alpha1 schema: openAPIV3Schema: properties: @@ -100,17 +100,318 @@ spec: type: boolean assertivePdbDeletion: type: boolean - assertivePdbDeletion: - description: turn on the assertive approach to delete pdb resource - kubectl delete pdb ..... automatically triggers the pluggable database - deletion - type: boolean cdbName: type: string cdbNamespace: type: string + cdbResName: + type: string + copyAction: + enum: + - COPY + - NOCOPY + - MOVE + type: string + dropAction: + enum: + - INCLUDING + - KEEP + type: string + fileNameConversions: + type: string + getScript: + type: boolean + modifyOption: + enum: + - IMMEDIATE + - NORMAL + - READ ONLY + - READ WRITE + - RESTRICTED + type: string + pdbName: + type: string + pdbOrdsPrvKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbOrdsPubKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbState: + enum: + - OPEN + - CLOSE + type: string + pdbTlsCat: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbTlsCrt: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbTlsKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + reuseTempFile: + type: boolean + sourceFileNameConversions: + type: string + sparseClonePath: + type: string + srcPdbName: + type: string + tdeExport: + type: boolean + tdeImport: + type: boolean + tdeKeystorePath: + type: string + tdePassword: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + tdeSecret: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + tempSize: + type: string + totalSize: + type: string + unlimitedStorage: + type: boolean + webServerPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + xmlFileName: + type: string + required: + - action + type: object + status: + properties: + action: + type: string + connString: + type: string + modifyOption: + type: string + msg: + type: string + openMode: + type: string + phase: + type: string + status: + type: boolean + totalSize: + type: string + required: + - phase + - status + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: Name of the CDB + jsonPath: .spec.cdbName + name: CDB Name + type: string + - description: Name of the PDB + jsonPath: .spec.pdbName + name: PDB Name + type: string + - description: PDB Open Mode + jsonPath: .status.openMode + name: PDB State + type: string + - description: Total Size of the PDB + jsonPath: .status.totalSize + name: PDB Size + type: string + - description: Status of the PDB Resource + jsonPath: .status.phase + name: Status + type: string + - description: Error message, if any + jsonPath: .status.msg + name: Message + type: string + - description: The connect string to be used + jsonPath: .status.connString + name: Connect_String + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + action: + enum: + - Create + - Clone + - Plug + - Unplug + - Delete + - Modify + - Status + - Map + type: string + adminName: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + adminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + asClone: + type: boolean + assertivePdbDeletion: + type: boolean + cdbName: + type: string cdbNamespace: - description: CDB Namespace type: string cdbResName: type: string diff --git a/config/crd/bases/database.oracle.com_shardingdatabases.yaml b/config/crd/bases/database.oracle.com_shardingdatabases.yaml index e46d883e..90c6dd53 100644 --- a/config/crd/bases/database.oracle.com_shardingdatabases.yaml +++ b/config/crd/bases/database.oracle.com_shardingdatabases.yaml @@ -154,9 +154,6 @@ spec: directorName: type: string envVars: - description: Replicas int32 `json:"replicas,omitempty"` // - Gsm Replicas. If you set OraGsmPvcName then it is set default - to 1. items: properties: name: @@ -658,12 +655,6 @@ spec: x-kubernetes-int-or-string: true type: object type: object - shardGroup: - type: string - shardRegion: - type: string - shardSpace: - type: string storageSizeInGb: format: int32 type: integer diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 1a9d97d3..7a52fb17 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -8,5 +8,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization images: - name: controller - newName: lin.ocir.io/intsanjaysingh/mmalvezz/testppr/operatormntnns - newTag: latest + newName: container-registry.oracle.com/database/operator + newTag: 1.2.0 diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index 3a0f15ec..b186a5b0 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -168,6 +168,27 @@ webhooks: resources: - autonomousdatabasebackups sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-database-oracle-com-v4-cdb + failurePolicy: Fail + name: mcdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - cdbs + sideEffects: None - admissionReviewVersions: - v1 - v1beta1 @@ -232,23 +253,24 @@ webhooks: sideEffects: None - admissionReviewVersions: - v1 + - v1beta1 clientConfig: service: name: webhook-service namespace: system - path: /mutate-database-oracle-com-v1alpha1-shardingdatabase + path: /mutate-database-oracle-com-v4-pdb failurePolicy: Fail - name: mshardingdatabasev1alpha1.kb.io + name: mpdb.kb.io rules: - apiGroups: - database.oracle.com apiVersions: - - v1alpha1 + - v4 operations: - CREATE - UPDATE resources: - - shardingdatabases + - pdbs sideEffects: None - admissionReviewVersions: - v1 @@ -258,7 +280,7 @@ webhooks: namespace: system path: /mutate-database-oracle-com-v1alpha1-shardingdatabase failurePolicy: Fail - name: mshardingdatabase.kb.io + name: mshardingdatabasev1alpha1.kb.io rules: - apiGroups: - database.oracle.com @@ -602,6 +624,27 @@ webhooks: resources: - autonomousdatabases sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-database-oracle-com-v4-cdb + failurePolicy: Fail + name: vcdb.kb.io + rules: + - apiGroups: + - database.oracle.com + apiVersions: + - v4 + operations: + - CREATE + - UPDATE + resources: + - cdbs + sideEffects: None - admissionReviewVersions: - v1 - v1beta1 @@ -646,24 +689,24 @@ webhooks: sideEffects: None - admissionReviewVersions: - v1 + - v1beta1 clientConfig: service: name: webhook-service namespace: system - path: /validate-database-oracle-com-v1alpha1-shardingdatabase + path: /validate-database-oracle-com-v4-pdb failurePolicy: Fail - name: vshardingdatabasev1alpha1.kb.io + name: vpdb.kb.io rules: - apiGroups: - database.oracle.com apiVersions: - - v1alpha1 + - v4 operations: - CREATE - UPDATE - - DELETE resources: - - shardingdatabases + - pdbs sideEffects: None - admissionReviewVersions: - v1 @@ -673,7 +716,7 @@ webhooks: namespace: system path: /validate-database-oracle-com-v1alpha1-shardingdatabase failurePolicy: Fail - name: vshardingdatabase.kb.io + name: vshardingdatabasev1alpha1.kb.io rules: - apiGroups: - database.oracle.com diff --git a/oracle-database-operator.yaml b/oracle-database-operator.yaml index 70147329..1179b272 100644 --- a/oracle-database-operator.yaml +++ b/oracle-database-operator.yaml @@ -1309,6 +1309,244 @@ spec: singular: cdb scope: Namespaced versions: + - additionalPrinterColumns: + - description: Name of the CDB + jsonPath: .spec.cdbName + name: CDB Name + type: string + - description: ' Name of the DB Server' + jsonPath: .spec.dbServer + name: DB Server + type: string + - description: DB server port + jsonPath: .spec.dbPort + name: DB Port + type: integer + - description: Replicas + jsonPath: .spec.replicas + name: Replicas + type: integer + - description: Status of the CDB Resource + jsonPath: .status.phase + name: Status + type: string + - description: Error message, if any + jsonPath: .status.msg + name: Message + type: string + - description: ' string of the tnsalias' + jsonPath: .spec.dbTnsurl + name: TNS STRING + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + cdbAdminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbAdminUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbName: + type: string + cdbOrdsPrvKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbOrdsPubKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbTlsCrt: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + cdbTlsKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + dbPort: + type: integer + dbServer: + type: string + dbTnsurl: + type: string + deletePdbCascade: + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + ordsImage: + type: string + ordsImagePullPolicy: + enum: + - Always + - Never + type: string + ordsImagePullSecret: + type: string + ordsPort: + type: integer + ordsPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + replicas: + type: integer + serviceName: + type: string + sysAdminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + type: object + status: + properties: + msg: + type: string + phase: + type: string + status: + type: boolean + required: + - phase + - status + type: object + type: object + served: true + storage: false + subresources: + status: {} - additionalPrinterColumns: - description: Name of the CDB jsonPath: .spec.cdbName @@ -8631,7 +8869,7 @@ spec: externalConnectString: type: string fastStartFailover: - type: boolean + type: string primaryDatabase: type: string primaryDatabaseRef: @@ -8728,7 +8966,7 @@ spec: externalConnectString: type: string fastStartFailover: - type: boolean + type: string primaryDatabase: type: string primaryDatabaseRef: @@ -11023,7 +11261,7 @@ spec: jsonPath: .status.connString name: Connect_String type: string - name: v4 + name: v1alpha1 schema: openAPIV3Schema: properties: @@ -11080,16 +11318,10 @@ spec: type: boolean assertivePdbDeletion: type: boolean - assertivePdbDeletion: - description: turn on the assertive approach to delete pdb resource kubectl delete pdb ..... automatically triggers the pluggable database deletion - type: boolean cdbName: type: string cdbNamespace: type: string - cdbNamespace: - description: CDB Namespace - type: string cdbResName: type: string copyAction: @@ -11306,15 +11538,324 @@ spec: type: object type: object served: true - storage: true + storage: false subresources: status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - cert-manager.io/inject-ca-from: oracle-database-operator-system/oracle-database-operator-serving-cert + - additionalPrinterColumns: + - description: Name of the CDB + jsonPath: .spec.cdbName + name: CDB Name + type: string + - description: Name of the PDB + jsonPath: .spec.pdbName + name: PDB Name + type: string + - description: PDB Open Mode + jsonPath: .status.openMode + name: PDB State + type: string + - description: Total Size of the PDB + jsonPath: .status.totalSize + name: PDB Size + type: string + - description: Status of the PDB Resource + jsonPath: .status.phase + name: Status + type: string + - description: Error message, if any + jsonPath: .status.msg + name: Message + type: string + - description: The connect string to be used + jsonPath: .status.connString + name: Connect_String + type: string + name: v4 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + action: + enum: + - Create + - Clone + - Plug + - Unplug + - Delete + - Modify + - Status + - Map + type: string + adminName: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + adminPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + asClone: + type: boolean + assertivePdbDeletion: + type: boolean + cdbName: + type: string + cdbNamespace: + type: string + cdbResName: + type: string + copyAction: + enum: + - COPY + - NOCOPY + - MOVE + type: string + dropAction: + enum: + - INCLUDING + - KEEP + type: string + fileNameConversions: + type: string + getScript: + type: boolean + modifyOption: + enum: + - IMMEDIATE + - NORMAL + - READ ONLY + - READ WRITE + - RESTRICTED + type: string + pdbName: + type: string + pdbOrdsPrvKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbOrdsPubKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbState: + enum: + - OPEN + - CLOSE + type: string + pdbTlsCat: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbTlsCrt: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + pdbTlsKey: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + reuseTempFile: + type: boolean + sourceFileNameConversions: + type: string + sparseClonePath: + type: string + srcPdbName: + type: string + tdeExport: + type: boolean + tdeImport: + type: boolean + tdeKeystorePath: + type: string + tdePassword: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + tdeSecret: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + tempSize: + type: string + totalSize: + type: string + unlimitedStorage: + type: boolean + webServerPwd: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + webServerUser: + properties: + secret: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - secret + type: object + xmlFileName: + type: string + required: + - action + type: object + status: + properties: + action: + type: string + connString: + type: string + modifyOption: + type: string + msg: + type: string + openMode: + type: string + phase: + type: string + status: + type: boolean + totalSize: + type: string + required: + - phase + - status + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: oracle-database-operator-system/oracle-database-operator-serving-cert controller-gen.kubebuilder.io/version: v0.16.5 name: shardingdatabases.database.oracle.com spec: @@ -11466,7 +12007,6 @@ spec: directorName: type: string envVars: - description: Replicas int32 `json:"replicas,omitempty"` // Gsm Replicas. If you set OraGsmPvcName then it is set default to 1. items: properties: name: @@ -11968,12 +12508,6 @@ spec: x-kubernetes-int-or-string: true type: object type: object - shardGroup: - type: string - shardRegion: - type: string - shardSpace: - type: string storageSizeInGb: format: int32 type: integer @@ -13788,26 +14322,6 @@ webhooks: resources: - shardingdatabases sideEffects: None -- admissionReviewVersions: - - v1 - clientConfig: - service: - name: oracle-database-operator-webhook-service - namespace: oracle-database-operator-system - path: /mutate-database-oracle-com-v1alpha1-shardingdatabase - failurePolicy: Fail - name: mshardingdatabase.kb.io - rules: - - apiGroups: - - database.oracle.com - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - shardingdatabases - sideEffects: None - admissionReviewVersions: - v1 - v1beta1 @@ -14165,27 +14679,6 @@ webhooks: resources: - shardingdatabases sideEffects: None -- admissionReviewVersions: - - v1 - clientConfig: - service: - name: oracle-database-operator-webhook-service - namespace: oracle-database-operator-system - path: /validate-database-oracle-com-v1alpha1-shardingdatabase - failurePolicy: Fail - name: vshardingdatabase.kb.io - rules: - - apiGroups: - - database.oracle.com - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - - DELETE - resources: - - shardingdatabases - sideEffects: None - admissionReviewVersions: - v1 - v1beta1 From 5a1d9d802f9fa841cbd3eaac62a14fb5de7b39be Mon Sep 17 00:00:00 2001 From: marcstef Date: Fri, 9 May 2025 12:16:40 +0000 Subject: [PATCH 21/24] example SIDB --- docs/ordsservices/examples/sidb_container.md | 50 ++++++++++---------- 1 file changed, 24 insertions(+), 26 deletions(-) diff --git a/docs/ordsservices/examples/sidb_container.md b/docs/ordsservices/examples/sidb_container.md index 804ecca4..3cda09ea 100644 --- a/docs/ordsservices/examples/sidb_container.md +++ b/docs/ordsservices/examples/sidb_container.md @@ -2,57 +2,50 @@ This example walks through using the **ORDSSRVS Controller** with a Containerised Oracle Database created by the **SIDB Controller** in the same Kubernetes Cluster. -### Cert-Manager and Oracle Database Operator installation - -Install the [Cert Manager](https://github.com/cert-manager/cert-manager/releases/download/v1.14.4/cert-manager.yaml) and the [Oracle Database Operator](https://github.com/oracle/oracle-database-operator) using the instractions in the Operator [README](https://github.com/oracle/oracle-database-operator/blob/main/README.md) file. - +Before testing this example, please verify the prerequisites : [ORDSSRVS prerequisites](../README.md#prerequisites) ### Deploy a Containerised Oracle Database +Refer to Single Instance Database (SIDB) [README](https://github.com/oracle/oracle-database-operator/blob/main/docs/sidb/README.md) for details. + 1. Create a Secret for the Database password: ```bash - DB_PWD=$(echo "ORDSpoc_$(date +%H%S%M)") - - kubectl create secret generic sidb-db-auth \ - --from-literal=password=${DB_PWD} + DB_PWD= + kubectl create secret generic sidb-db-auth --from-literal=password=${DB_PWD} --namespace ordsnamespace ``` 1. Create a manifest for the containerised Oracle Database. The POC uses an Oracle Free Image, but other versions may be subsituted; review the OraOperator Documentation for details on the manifests. - ```bash - echo " - apiVersion: database.oracle.com/v1alpha1 + ```yaml + apiVersion: database.oracle.com/v4 kind: SingleInstanceDatabase metadata: name: oraoper-sidb + namespace: ordsnamespace spec: - replicas: 1 - image: - pullFrom: container-registry.oracle.com/database/free:23.4.0.0 - prebuiltDB: true - sid: FREE edition: free adminPassword: secretName: sidb-db-auth - secretKey: password - pdbName: FREEPDB1" | kubectl apply -f - + image: + pullFrom: container-registry.oracle.com/database/free:23.7.0.0 + prebuiltDB: true + replicas: 1 ``` - latest container-registry.oracle.com/database/free version, **23.4.0.0**, valid as of **2-May-2024** + latest container-registry.oracle.com/database/free version, **23.7.0.0-lite**, valid as of **2-May-2025** + 1. Watch the `singleinstancedatabases` resource until the database status is **Healthy**: ```bash - kubectl get singleinstancedatabases/oraoper-sidb -w + kubectl get singleinstancedatabases/oraoper-sidb -w -n ordsnamespace ``` - **NOTE**: If this is the first time pulling the free database image, it may take up to 15 minutes for the database to become available. ### Create encryped secret ```bash - openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > ca.key openssl rsa -in ca.key -outform PEM -pubout -out public.pem kubectl create secret generic prvkey --from-file=privateKey=ca.key -n ordsnamespace @@ -61,8 +54,6 @@ echo "${DB_PWD}" > sidb-db-auth openssl rsautl -encrypt -pubin -inkey public.pem -in sidb-db-auth |base64 > e_sidb-db-auth kubectl create secret generic sidb-db-auth-enc --from-file=password=e_sidb-db-auth -n ordsnamespace rm sidb-db-auth e_sidb-db-auth - - ``` @@ -72,6 +63,7 @@ rm sidb-db-auth e_sidb-db-auth ```bash CONN_STRING=$(kubectl get singleinstancedatabase oraoper-sidb \ + -n ordsnamespace \ -o jsonpath='{.status.pdbConnectString}') echo $CONN_STRING @@ -89,7 +81,7 @@ rm sidb-db-auth e_sidb-db-auth ```bash echo " - apiVersion: database.oracle.com/v1 + apiVersion: database.oracle.com/v4 kind: OrdsSrvs metadata: name: ords-sidb @@ -97,6 +89,9 @@ rm sidb-db-auth e_sidb-db-auth spec: image: container-registry.oracle.com/database/ords:24.1.1 forceRestart: true + encPrivKey: + secretName: prvkey + passwordKey: privateKey globalSettings: database.api.enabled: true poolSettings: @@ -112,7 +107,10 @@ rm sidb-db-auth e_sidb-db-auth secretName: sidb-db-auth-enc db.adminUser: SYS db.adminUser.secret: - secretName: sidb-db-auth-enc" | kubectl apply -f - + secretName: sidb-db-auth-enc + " > ords-sidb.yaml + + kubectl apply -f ords-sidb.yaml ``` latest container-registry.oracle.com/database/ords version, **24.1.1**, valid as of **30-May-2024** From 9f3e5dfdf0c1578158d7c99575fc8faec0ba4c28 Mon Sep 17 00:00:00 2001 From: marcstef Date: Fri, 9 May 2025 12:30:13 +0000 Subject: [PATCH 22/24] ORDSSRVS examples --- docs/ordsservices/README.md | 43 ++++++- docs/ordsservices/examples/adb.md | 40 +++---- docs/ordsservices/examples/adb_oraoper.md | 86 ++++++-------- docs/ordsservices/examples/existing_db.md | 112 ++++++++++++++++++ docs/ordsservices/examples/mongo_api.md | 14 +-- docs/ordsservices/examples/multi_pool.md | 27 ++--- .../examples/ordsnamespace-role-binding.yaml | 13 ++ 7 files changed, 234 insertions(+), 101 deletions(-) create mode 100644 docs/ordsservices/examples/existing_db.md create mode 100644 docs/ordsservices/examples/ordsnamespace-role-binding.yaml diff --git a/docs/ordsservices/README.md b/docs/ordsservices/README.md index 1740e99f..57195120 100644 --- a/docs/ordsservices/README.md +++ b/docs/ordsservices/README.md @@ -25,22 +25,53 @@ It supports the majority of ORDS configuration settings as per the [API Document The ORDS and APEX schemas can be [automatically installed/upgraded](./autoupgrade.md) into the Oracle Database by the ORDS controller. ORDS Version support: -* v22.1+ +* 24.1.1 +(Newer versions of ORDS will be supported in the next update of OraOperator) Oracle Database Version: * 19c * 23ai (incl. 23ai Free) +### Prerequisites -### Common Configurations +1. Oracle Database Operator + + Install the Oracle Database Operator (OraOperator) using the instructions in the [README](https://github.com/oracle/oracle-database-operator/blob/main/README.md) file. + +1. Namespace + + For a dedicated namespace deployment of the ORDSSRVS controller, refer to the "Namespace Scoped Deployment" section in the OraOperator [README](https://github.com/oracle/oracle-database-operator/blob/main/README.md#2-namespace-scoped-deployment). + + The following examples deploy the controller to the 'ordsnamespace' namespace. + + Create the namespace: + ```bash + kubectl create namespace ordsnamespace + ``` + + Apply namespace role binding [ordsnamespace-role-binding.yaml](./ordsnamespace-role-binding.yaml): + ```bash + kubectl apply -f ordsnamespace-role-binding.yaml + ``` + + Edit OraOperator to add the namespace under WATCH_NAMESPACE: + ```yaml + - name: WATCH_NAMESPACE + value: "default,,ordsnamespace" + ``` + +### Common configuration examples A few common configuration examples can be used to quickly familiarise yourself with the ORDS Custom Resource Definition. The "Conclusion" section of each example highlights specific settings to enable functionality that maybe of interest. -* [Containerised Single Instance Database using the Oracontroller](./examples/sidb_container.md) -* [Multipool, Multidatabase using a TNS Names file](./examples/multi_pool.md) -* [Autonomous Database using the Oracontroller](./examples/adb_oraoper.md) - (Customer Managed ORDS) *See [Limitations](#limitations) -* [Autonomous Database without the Oracontroller](./examples/adb.md) - (Customer Managed ORDS) +Before + +* [Pre-existing Database](./examples/existing_db.md) +* [Containerised Single Instance Database (SIDB)](./examples/sidb_container.md) +* [Multidatabase using a TNS Names file](./examples/multi_pool.md) +* [Autonomous Database using the OraOperator](./examples/adb_oraoper.md) *See [Limitations](#limitations) +* [Autonomous Database without the OraOperator](./examples/adb.md) * [Oracle API for MongoDB Support](./examples/mongo_api.md) Running through all examples in the same Kubernetes cluster illustrates the ability to run multiple ORDS instances with a variety of different configurations. diff --git a/docs/ordsservices/examples/adb.md b/docs/ordsservices/examples/adb.md index ba53aac5..90a21b5c 100644 --- a/docs/ordsservices/examples/adb.md +++ b/docs/ordsservices/examples/adb.md @@ -5,11 +5,7 @@ This example walks through using the **ORDSSRVS controller** with an Oracle Auto This assumes that an ADB has already been provisioned and is configured as "Secure Access from Anywhere". Note that if behind a Proxy, this example will not work as the Wallet will need to be modified to support the proxy configuration. - -### Cert-Manager and Oracle Database Operator installation - -Install the [Cert Manager](https://github.com/cert-manager/cert-manager/releases/download/v1.14.4/cert-manager.yaml) and the [Oracle Database Operator](https://github.com/oracle/oracle-database-operator) using the instractions in the Operator [README](https://github.com/oracle/oracle-database-operator/blob/main/README.md) file. - +Before testing this example, please verify the prerequisites : [ORDSSRVS prerequisites](../README.md#prerequisites) ### ADB Wallet Secret @@ -25,13 +21,13 @@ kubectl create secret generic adb-wallet \ Create a Secret for the ADB ADMIN password, replacing with the real password: ```bash -echo adb-db-auth-enc -openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > ca.k +echo ${ADMIN_PASSWORD} > adb-db-auth-enc +openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > ca.key openssl rsa -in ca.key -outform PEM -pubout -out public.pem kubectl create secret generic prvkey --from-file=privateKey=ca.key -n ordsnamespace -openssl rsautl -encrypt -pubin -inkey public.pem -in adb-db-auth-enc |base64 > e_sidb-db-auth-enc -kubectl create secret generic adb-db-auth-enc --from-file=password=e_sidb-db-auth-enc -n ordsnamespace -rm adb-db-auth-enc e_sidb-db-auth-enc +openssl rsautl -encrypt -pubin -inkey public.pem -in adb-db-auth-enc |base64 > e_adb-db-auth-enc +kubectl create secret generic adb-oraoper-db-auth-enc --from-file=password=e_adb-db-auth-enc -n ordsnamespace +rm adb-db-auth-enc e_adb-db-auth-enc ``` ### Create RestDataServices Resource @@ -43,22 +39,24 @@ rm adb-db-auth-enc e_sidb-db-auth-enc Replace with the ADB Name and ensure that the `db.wallet.zip.service` is valid for your ADB Workload (e.g. _TP or _HIGH, etc.): - ```bash - echo " - apiVersion: database.oracle.com/v1 - kind: OrdsSrvs + ```yaml + apiVersion: database.oracle.com/v4 + kind: OrdsSrvs metadata: name: ords-adb namespace: ordsnamespace spec: image: container-registry.oracle.com/database/ords:24.1.1 - globalSettings: - database.api.enabled: true + forceRestart: true encPrivKey: secretName: prvkey passwordKey: privateKey + globalSettings: + database.api.enabled: true poolSettings: - poolName: adb + restEnabledSql.active: true + plsql.gateway.mode: direct db.wallet.zip.service: _TP dbWalletSecret: secretName: adb-wallet @@ -68,18 +66,16 @@ rm adb-db-auth-enc e_sidb-db-auth-enc plsql.gateway.mode: proxied db.username: ORDS_PUBLIC_USER_OPER db.secret: - secretName: adb-db-auth-enc - passwordKey: password + secretName: adb-oraoper-db-auth-enc db.adminUser: ADMIN db.adminUser.secret: - secretName: adb-db-auth-enc - passwordKey: password" | kubectl apply -f - + secretName: adb-oraoper-db-auth-enc ``` latest container-registry.oracle.com/database/ords version, **24.1.1**, valid as of **30-May-2024** 1. Watch the restdataservices resource until the status is **Healthy**: ```bash - kubectl get ordssrvs ords-adb -w + kubectl get -n ordsnamespace ordssrvs ords-adb -w ``` **NOTE**: If this is the first time pulling the ORDS image, it may take up to 5 minutes. If APEX @@ -91,7 +87,7 @@ rm adb-db-auth-enc e_sidb-db-auth-enc Open a port-forward to the ORDS service, for example: ```bash -kubectl port-forward service/ords-adb 8443:8443 +kubectl port-forward service/ords-adb -n ordsnamespace 8443:8443 ``` Direct your browser to: `https://localhost:8443/ords/adb` diff --git a/docs/ordsservices/examples/adb_oraoper.md b/docs/ordsservices/examples/adb_oraoper.md index b0872fb3..253365c5 100644 --- a/docs/ordsservices/examples/adb_oraoper.md +++ b/docs/ordsservices/examples/adb_oraoper.md @@ -4,23 +4,15 @@ This example walks through using the **ORDS Controller** with a Containerised Or When connecting to a mTLS enabled ADB while using the OraOperator to retreive the Wallet as is done in the example, it is currently not supported to have multiple, different databases supported by the single Ordssrvs resource. This is due to a requirement to set the `TNS_ADMIN` parameter at the Pod level ([#97](https://github.com/oracle/oracle-database-operator/issues/97)). -### Cert-Manager and Oracle Database Operator installation - -Install the [Cert Manager](https://github.com/cert-manager/cert-manager/releases/download/v1.14.4/cert-manager.yaml) and the [Oracle Database Operator](https://github.com/oracle/oracle-database-operator) using the instractions in the Operator [README](https://github.com/oracle/oracle-database-operator/blob/main/README.md) file. +Before testing this example, please verify the prerequisites : [ORDSSRVS prerequisites](../README.md#prerequisites) ### Setup Oracle Cloud Authorisation -In order for the OraOperator to access the ADB, some pre-requisites are required, as detailed [here](https://github.com/oracle/oracle-database-operator/blob/main/docs/adb/ADB_PREREQUISITES.md). Either establish Instance Principles or create the required ConfigMap/Secret. This example uses the later: +In order for the OraOperator to access the ADB, some additional pre-requisites are required, as detailed [here](https://github.com/oracle/oracle-database-operator/blob/main/docs/adb/ADB_PREREQUISITES.md). +Either establish Instance Principles or create the required ConfigMap/Secret. This example uses the later, using the helper script [set_ocicredentials.sh](https://github.com/oracle/oracle-database-operator/blob/main/set_ocicredentials.sh) : ```bash -kubectl create configmap oci-cred \ ---from-literal=tenancy= \ ---from-literal=user= \ ---from-literal=fingerprint= \ ---from-literal=region= - -kubectl create secret generic oci-privatekey \ ---from-file=privatekey= +./set_ocicredentials.sh run -n ordsnamespace ``` ### ADB ADMIN Password Secret @@ -31,6 +23,7 @@ Create a Secret for the ADB Admin password: DB_PWD=$(echo "ORDSpoc_$(date +%H%S%M)") kubectl create secret generic adb-oraoper-db-auth \ + -n ordsnamespace \ --from-literal=adb-oraoper-db-auth=${DB_PWD} ``` @@ -40,51 +33,49 @@ kubectl create secret generic adb-oraoper-db-auth \ 1. Obtain the OCID of the ADB and set to an environment variable: - ``` - export ADB_OCID= - ``` + ```bash + export ADB_OCID= + ``` -1. Create a manifest to bind to the ADB. +1. Create and apply a manifest to bind to the ADB. + "adb-oraoper-tns-admin" secret will be created by the controller. - ```bash - echo " - apiVersion: database.oracle.com/v1alpha1 + ```yaml + apiVersion: database.oracle.com/v4 kind: AutonomousDatabase metadata: name: adb-oraoper + namespace: ordsnamespace spec: - hardLink: false - ociConfig: - configMapName: oci-cred - secretName: oci-privatekey - details: - autonomousDatabaseOCID: $ADB_OCID - wallet: + action: Sync + wallet: name: adb-oraoper-tns-admin password: k8sSecret: - name: adb-oraoper-db-auth" | kubectl apply -f - + name: adb-oraoper-db-auth + details: + id: $ADB_OCID ``` 1. Update the ADMIN Password: -```bash - kubectl patch adb adb-oraoper --type=merge \ - -p '{"spec":{"details":{"adminPassword":{"k8sSecret":{"name":"adb-oraoper-db-auth"}}}}}' -``` + ```bash + kubectl patch adb adb-oraoper --type=merge \ + -n ordsnamespace \ + -p '{"spec":{"details":{"adminPassword":{"k8sSecret":{"name":"adb-oraoper-db-auth"}}}}}' + ``` 1. Watch the `adb` resource until the STATE is **AVAILABLE**: ```bash - kubectl get adb/adb-oraoper -w + kubectl get -n ordsnamespace adb/adb-oraoper -w ``` ### Create encrypted password - ```bash -echo ${DB_PWD} adb-db-auth-enc -openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > ca.k +echo ${DB_PWD} > adb-db-auth-enc +openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > ca.key openssl rsa -in ca.key -outform PEM -pubout -out public.pem kubectl create secret generic prvkey --from-file=privateKey=ca.key -n ordsnamespace openssl rsautl -encrypt -pubin -inkey public.pem -in adb-db-auth-enc |base64 > e_adb-db-auth-enc @@ -92,24 +83,21 @@ kubectl create secret generic adb-oraoper-db-auth-enc --from-file=password=e_ad rm adb-db-auth-enc e_adb-db-auth-enc ``` - - ### Create OrdsSrvs Resource 1. Obtain the Service Name from the OraOperator - ```bash - SERVICE_NAME=$(kubectl get adb adb-oraoper -o=jsonpath='{.spec.details.dbName}'_TP) - ``` + ```bash + SERVICE_NAME=$(kubectl get -n ordsnamespace adb adb-oraoper -o=jsonpath='{.spec.details.dbName}'_TP) + ``` 1. Create a manifest for ORDS. As an ADB already maintains ORDS and APEX, `autoUpgradeORDS` and `autoUpgradeAPEX` will be ignored if set. A new DB User for ORDS will be created to avoid conflict with the pre-provisioned one. This user will be named, `ORDS_PUBLIC_USER_OPER` if `db.username` is either not specified or set to `ORDS_PUBLIC_USER`. - ```bash - echo " - apiVersion: database.oracle.com/v1 + ```yaml + apiVersion: database.oracle.com/v4 kind: OrdsSrvs metadata: name: ords-adb-oraoper @@ -117,10 +105,10 @@ rm adb-db-auth-enc e_adb-db-auth-enc spec: image: container-registry.oracle.com/database/ords:24.1.1 forceRestart: true - encPrivKey: - secretName: prvkey - passwordKey: privateKey - globalSettings: + encPrivKey: + secretName: prvkey + passwordKey: privateKey + globalSettings: database.api.enabled: true poolSettings: - poolName: adb-oraoper @@ -134,11 +122,9 @@ rm adb-db-auth-enc e_adb-db-auth-enc db.username: ORDS_PUBLIC_USER_OPER db.secret: secretName: adb-oraoper-db-auth-enc - passwordKey: adb-oraoper-db-auth-enc db.adminUser: ADMIN db.adminUser.secret: secretName: adb-oraoper-db-auth-enc - passwordKey: adb-oraoper-db-auth-enc" | kubectl apply -f - ``` latest container-registry.oracle.com/database/ords version, **24.1.1**, valid as of **30-May-2024** @@ -157,7 +143,7 @@ rm adb-db-auth-enc e_adb-db-auth-enc Open a port-forward to the ORDS service, for example: ```bash -kubectl port-forward service/ords-adb-oraoper 8443:8443 +kubectl port-forward service/ords-adb-oraoper -n ordsnamespace 8443:8443 ``` Direct your browser to: `https://localhost:8443/ords/adb-oraoper` diff --git a/docs/ordsservices/examples/existing_db.md b/docs/ordsservices/examples/existing_db.md new file mode 100644 index 00000000..6d4791ae --- /dev/null +++ b/docs/ordsservices/examples/existing_db.md @@ -0,0 +1,112 @@ +# Example: Pre-existing Database + +This example walks through configuring the ORDS Controller to use either a database deployed within Kubernetes, or an existing database external to your cluster. + +Before testing this example, please verify the prerequisites : [ORDSSRVS prerequisites](../README.md#prerequisites) + +### Database Access + +This example assumes you have a running, accessible Oracle Database. + +```bash +export CONN_STRING=:/ +``` + +### Create encrypted secrets + +```bash +DB_PWD= + +openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > ca.key +openssl rsa -in ca.key -outform PEM -pubout -out public.pem +kubectl create secret generic prvkey --from-file=privateKey=ca.key -n ordsnamespace + +echo "${DB_PWD}" > db-auth +openssl rsautl -encrypt -pubin -inkey public.pem -in db-auth |base64 > e_db-auth-enc +kubectl create secret generic db-auth-enc --from-file=password=e_db-auth-enc -n ordsnamespace + +rm db-auth e_db-auth-enc + +``` + +### Create ordssrvs Resource + +1. Create a manifest for ORDS. + + This example assumes APEX is already installed in the database. + + The following additional keys are specified for the pool: + * `autoUpgradeORDS` - Boolean; when true the ORDS will be installed/upgraded in the database + * `db.adminUser` - User with privileges to install, upgrade or uninstall ORDS in the database (SYS). + * `db.adminUser.secret` - Secret containing the password for `db.adminUser` (created in the first step) + * `db.username` will be used as the ORDS schema in the database during the install/upgrade process (ORDS_PUBLIC_USER). + + ```bash + echo " + apiVersion: database.oracle.com/v4 + kind: OrdsSrvs + metadata: + name: ords-db + namespace: ordsnamespace + spec: + image: container-registry.oracle.com/database/ords:24.1.1 + forceRestart: true + encPrivKey: + secretName: prvkey + passwordKey: privateKey + globalSettings: + database.api.enabled: true + poolSettings: + - poolName: default + autoUpgradeORDS: true + restEnabledSql.active: true + plsql.gateway.mode: direct + db.connectionType: customurl + db.customURL: jdbc:oracle:thin:@//${CONN_STRING} + db.username: ORDS_PUBLIC_USER + db.secret: + secretName: db-auth-enc + db.adminUser: SYS + db.adminUser.secret: + secretName: db-auth-enc + " > ords-db.yaml + + kubectl apply -f ords-db.yaml + ``` + + latest container-registry.oracle.com/database/ords version, **24.1.1**, valid as of **30-May-2024** + +1. Watch the restdataservices resource until the status is **Healthy**: + ```bash + kubectl get ordssrvs ords-sidb -w + ``` + + **NOTE**: If this is the first time pulling the ORDS image, it may take up to 5 minutes. + + You can watch the APEX/ORDS Installation progress by running: + + ```bash + POD_NAME=$(kubectl get pod -l "app.kubernetes.io/instance=ords-sidb" -o custom-columns=NAME:.metadata.name -n ordsnamespace --no-headers) + + kubectl logs ${POD_NAME} -c ords-sidb-init -n ordsnamespace -f + ``` + +### Test + +Open a port-forward to the ORDS service, for example: + +```bash +kubectl port-forward service/ords-db -n ordsnamespace 8443:8443 +``` + +Direct your browser to: `https://localhost:8443/ords` + + +## Conclusion + +This example has a single database pool, named `default`. It is set to: + +* Automatically restart when the configuration changes: `forceRestart: true` +* Automatically install/update ORDS on startup, if required: `autoUpgradeORDS: true` +* Use a basic connection string to connect to the database: `db.customURL: jdbc:oracle:thin:@//${CONN_STRING}` +* The `passwordKey` has been ommitted from both `db.secret` and `db.adminUser.secret` as the password was stored in the default key (`password`) diff --git a/docs/ordsservices/examples/mongo_api.md b/docs/ordsservices/examples/mongo_api.md index 70391fbd..f0fd0cf5 100644 --- a/docs/ordsservices/examples/mongo_api.md +++ b/docs/ordsservices/examples/mongo_api.md @@ -2,11 +2,7 @@ This example walks through using the **ORDSSRVS Controller** with a Containerised Oracle Database to enable MongoDB API Support. - -### Cert-Manager and Oracle Database Operator installation - -Install the [Cert Manager](https://github.com/cert-manager/cert-manager/releases/download/v1.14.4/cert-manager.yaml) and the [Oracle Database Operator](https://github.com/oracle/oracle-database-operator) using the instractions in the Operator [README](https://github.com/oracle/oracle-database-operator/blob/main/README.md) file. - +Before testing this example, please verify the prerequisites : [ORDSSRVS prerequisites](../README.md#prerequisites) ### Database Access @@ -39,13 +35,15 @@ In the database, create an ORDS-enabled user. As this example uses the [Contain ### Create encrypted secrets ```bash -openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > ca.k + +openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -pkeyopt rsa_keygen_pubexp:65537 > ca.key openssl rsa -in ca.key -outform PEM -pubout -out public.pem kubectl create secret generic prvkey --from-file=privateKey=ca.key -n ordsnamespace + +echo "${DB_PWD}" > sidb-db-auth-enc openssl rsautl -encrypt -pubin -inkey public.pem -in sidb-db-auth-enc |base64 > e_sidb-db-auth-enc kubectl create secret generic sidb-db-auth-enc --from-file=password=e_sidb-db-auth-enc -n ordsnamespace rm sidb-db-auth-enc e_sidb-db-auth-enc - ``` ### Create ordssrvs Resource @@ -71,7 +69,7 @@ rm sidb-db-auth-enc e_sidb-db-auth-enc ```bash echo " apiVersion: database.oracle.com/v4 - kind: ordssrvs + kind: OrdsSrvs metadata: name: ords-sidb namespace: ordsnamespace diff --git a/docs/ordsservices/examples/multi_pool.md b/docs/ordsservices/examples/multi_pool.md index 21c5f24d..ffb537bf 100644 --- a/docs/ordsservices/examples/multi_pool.md +++ b/docs/ordsservices/examples/multi_pool.md @@ -4,9 +4,8 @@ This example walks through using the **ORDSSRVS Operator** with multiple databas Keep in mind that all pools are running in the same Pod, therefore, changing the configuration of one pool will require a recycle of all pools. -### Cert-Manager and Oracle Database Operator installation +Before testing this example, please verify the prerequisites : [ORDSSRVS prerequisites](../README.md#prerequisites) -Install the [Cert Manager](https://github.com/cert-manager/cert-manager/releases/download/v1.14.4/cert-manager.yaml) and the [Oracle Database Operator](https://github.com/oracle/oracle-database-operator) using the instractions in the Operator [README](https://github.com/oracle/oracle-database-operator/blob/main/README.md) file. ### TNS_ADMIN Secret @@ -86,25 +85,18 @@ If taking advantage of the [AutoUpgrade](../autoupgrade.md) functionality, creat In this example, only PDB1 will be set for [AutoUpgrade](../autoupgrade.md), the other PDBs already have APEX and ORDS installed. ```bash - - - echo "THIS_IS_A_PASSWORD" > syspwdfile -openssl rsautl -encrypt -pubin -inkey public.pem -in ordspwdfile |base64 > e_syspwdfile +openssl rsautl -encrypt -pubin -inkey public.pem -in syspwdfile |base64 > e_syspwdfile kubectl create secret generic pdb1-priv-auth-enc --from-file=password=e_syspwdfile -n ordsnamespace rm syspwdfile e_syspwdfile - -kubectl create secret generic pdb1-priv-auth \ - --from-literal=password=pdb1-battery-staple ``` ### Create OrdsSrvs Resource -1. Create a manifest for ORDS. +1. Create a manifest for ORDS, ords-multi-pool.yaml: - ```bash - echo " - apiVersion: database.oracle.com/v1 + ```yaml + apiVersion: database.oracle.com/v4 kind: OrdsSrvs metadata: name: ords-multi-pool @@ -166,10 +158,15 @@ kubectl create secret generic pdb1-priv-auth \ plsql.gateway.mode: proxied db.username: ORDS_PUBLIC_USER db.secret: - secretName: multi-ords-auth-enc" | kubectl apply -f - + secretName: multi-ords-auth-enc ``` latest container-registry.oracle.com/database/ords version, **24.1.1**, valid as of **30-May-2024** - + +1. Apply the yaml file: + ```bash + kubectl apply -f ords-multi-pool.yaml + ``` + 1. Watch the ordssrvs resource until the status is **Healthy**: ```bash kubectl get OrdsSrvs ords-multi-pool -n ordsnamespace -w diff --git a/docs/ordsservices/examples/ordsnamespace-role-binding.yaml b/docs/ordsservices/examples/ordsnamespace-role-binding.yaml new file mode 100644 index 00000000..018d8934 --- /dev/null +++ b/docs/ordsservices/examples/ordsnamespace-role-binding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: ordsnamespace-oracle-database-operator-manager-rolebinding + namespace: ordsnamespace +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: oracle-database-operator-manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: oracle-database-operator-system From 02a3474c5a39586a7adba72b4a0c05fa09c28309 Mon Sep 17 00:00:00 2001 From: marcstef Date: Fri, 9 May 2025 13:05:16 +0000 Subject: [PATCH 23/24] ORDSSRVS role-binding example --- docs/ordsservices/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ordsservices/README.md b/docs/ordsservices/README.md index 57195120..e2fa97be 100644 --- a/docs/ordsservices/README.md +++ b/docs/ordsservices/README.md @@ -49,7 +49,7 @@ Oracle Database Version: kubectl create namespace ordsnamespace ``` - Apply namespace role binding [ordsnamespace-role-binding.yaml](./ordsnamespace-role-binding.yaml): + Apply namespace role binding [ordsnamespace-role-binding.yaml](./examples/ordsnamespace-role-binding.yaml): ```bash kubectl apply -f ordsnamespace-role-binding.yaml ``` From e69e368b984f943205481b19f727d4bdc8a7bbdc Mon Sep 17 00:00:00 2001 From: marcstef Date: Mon, 12 May 2025 13:57:00 +0000 Subject: [PATCH 24/24] ORDSSRVS mainpage README --- README.md | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/README.md b/README.md index 7afa79e8..936ae23a 100644 --- a/README.md +++ b/README.md @@ -189,10 +189,7 @@ The following quickstarts are designed for specific database configurations: * [Containerized Oracle Globally Distributed Database](./docs/sharding/README.md) * [Oracle Multitenant Database](./docs/multitenant/README.md) * [Oracle Base Database Service (OBDS)](./docs/dbcs/README.md) - - -The following quickstart is designed for non-database configurations: -* [Oracle Database Observability](./docs/observability/README.md) +* [ORDS Services (ORDSSRVS)](./docs/ordsservices/README.md) The following quickstart is designed for non-database configurations: