diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 000000000..dfdb8b771
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+*.sh text eol=lf
diff --git a/.github/actions/createPostgresqlFlexibleServer/action.yml b/.github/actions/createPostgresqlFlexibleServer/action.yml
new file mode 100644
index 000000000..4e88d3da7
--- /dev/null
+++ b/.github/actions/createPostgresqlFlexibleServer/action.yml
@@ -0,0 +1,62 @@
+name: Create PostgreSQL Flexible Server
+description: Create PostgreSQL Flexible Server that allows access from Azure services.
+inputs:
+ dbAdminUser:
+ description: "Database Admin User"
+ required: true
+ dbName:
+ description: "Database Name"
+ required: true
+ dbPassword:
+ description: "Database Password"
+ required: true
+ dbServerName:
+ description: "Database Server Name"
+ required: true
+ location:
+ description: "Location"
+ required: true
+ resourceGroupName:
+ description: "Resource Group Name"
+ required: true
+
+runs:
+ using: "composite"
+ steps:
+ - uses: actions/checkout@v2.3.4
+ - name: Set azCliVersion
+ uses: ./.github/actions/setvars
+ with:
+ varFilePath: ./.github/variables/vm-dependencies.env
+ - name: Set Up Azure Postgresql that allows access from Azure services
+ id: setup-postgresql
+ uses: azure/CLI@v1
+ with:
+ azcliversion: ${{ env.azCliVersion }}
+ inlineScript: |
+ echo "Deploy DB with name " ${{ inputs.dbName }}
+ az postgres flexible-server create \
+ --resource-group ${{ inputs.resourceGroupName }} \
+ --name ${{ inputs.dbName }} \
+ --location ${{ inputs.location }} \
+ --admin-user ${{ inputs.dbAdminUser }} \
+ --admin-password ${{ inputs.dbPassword }} \
+ --version 16 \
+ --public-access 0.0.0.0 \
+ --tier Burstable \
+ --sku-name Standard_B1ms \
+ --yes
+
+ az postgres flexible-server db create \
+ --resource-group ${{ inputs.resourceGroupName }} \
+ --server-name ${{ inputs.dbName }} \
+ --database-name ${{ inputs.dbServerName }}
+
+ sleep 1m
+ echo "Allow Access To Azure Services"
+ az postgres flexible-server firewall-rule create \
+ -g ${{ inputs.resourceGroupName }} \
+ -n ${{ inputs.dbName }} \
+ -r "AllowAllAzureServices" \
+ --start-ip-address "0.0.0.0" \
+ --end-ip-address "0.0.0.0"
diff --git a/.github/actions/database-parameters/action.yaml b/.github/actions/database-parameters/action.yaml
new file mode 100644
index 000000000..53af62005
--- /dev/null
+++ b/.github/actions/database-parameters/action.yaml
@@ -0,0 +1,143 @@
+name: Get Database parameters
+description: Get Database parameters
+
+inputs:
+ databaseType:
+ description: "databaseType"
+ required: true
+ uamiId:
+ description: "uamiId"
+ required: true
+ serverHost:
+ description: "serverHost"
+ required: true
+ dbInstanceName:
+ description: "dbInstanceName"
+ required: true
+ dbAdminUser:
+ description: "dbAdminUser"
+ required: false
+ default: "testuser"
+ databaseName:
+ description: "databaseName"
+ required: true
+ default: "testdb"
+
+outputs:
+ enableDB:
+ description: "enableDB"
+ value: ${{ steps.database-parameters.outputs.enableDB }}
+ enablePswlessConnection:
+ description: "enablePswlessConnection"
+ value: ${{ steps.database-parameters.outputs.enablePswlessConnection }}
+ databaseType:
+ description: "databaseType"
+ value: ${{ steps.database-parameters.outputs.databaseType }}
+ dsConnectionURL:
+ description: "dsConnectionURL"
+ value: ${{ steps.database-parameters.outputs.dsConnectionURL }}
+ dbUser:
+ description: "dbUser"
+ value: ${{ steps.database-parameters.outputs.dbUser }}
+ dbIdentity:
+ description: "dbIdentity"
+ value: ${{ steps.database-parameters.outputs.dbIdentity }}
+
+runs:
+ using: "composite"
+ steps:
+ - name: Set up environment variables
+ shell: bash
+ id: database-parameters
+ run: |
+ enableDB=false
+ databaseType=sqlserver
+ dsConnectionURL=jdbc:postgresql://contoso.postgres.database:5432/${{ inputs.databaseName }}
+ dbUser=contosoDbUser
+ enablePswlessConnection=false
+ dbIdentity={}
+ serverHost=${{ inputs.serverHost }}
+ uamiId=${{ inputs.uamiId }}
+ echo "databaseType: ${{ inputs.databaseType }}"
+ echo "serverHost : $serverHost"
+ if ${{ inputs.databaseType == 'mssqlserver' }}; then
+ echo "Using mssqlserver database type"
+ enableDB=true
+ databaseType=sqlserver
+ dsConnectionURL="jdbc:sqlserver://$serverHost:1433;database=${{ inputs.databaseName }}"
+ dbUser=${{ inputs.dbAdminUser }}@${{ inputs.dbInstanceName }}
+ elif ${{ inputs.databaseType == 'mssqlserver-passwordless' }}; then
+ echo "Using mssqlserver-passwordless database type"
+ enableDB=true
+ enablePswlessConnection=true
+ databaseType=sqlserver
+ dsConnectionURL="jdbc:sqlserver://$serverHost:1433;database=${{ inputs.databaseName }}"
+ dbUser=${{ inputs.dbAdminUser }}@${{ inputs.dbInstanceName }}
+ dbIdentity=$(jq -n \
+ --arg uamiId "$uamiId" \
+ '{
+ "type": "UserAssigned",
+ "userAssignedIdentities": {
+ ($uamiId): {}
+ }
+ }'| jq -c '.')
+ elif ${{ inputs.databaseType == 'oracle' }}; then
+ echo "Using oracle database type"
+ enableDB=true
+ databaseType=oracle
+ dsConnectionURL=jdbc:oracle:thin:@${serverHost}:1521/oratest1
+ dbUser=${{ inputs.dbAdminUser }}
+ elif ${{ inputs.databaseType == 'mysql(flexible)' }}; then
+ echo "Using mysql(flexible) database type"
+ enableDB=true
+ databaseType=mysql
+ dsConnectionURL=jdbc:mysql://$serverHost:3306/${{ inputs.databaseName }}?sslMode=REQUIRED
+ dbUser=${{ inputs.dbAdminUser }}
+ elif ${{ inputs.databaseType == 'mysql-passwordless(flexible)' }}; then
+ echo "Using mysql-passwordless(flexible) database type"
+ enableDB=true
+ enablePswlessConnection=true
+ dbUser=$(basename "$uamiId")
+ dbIdentity=$(jq -n \
+ --arg uamiId "$uamiId" \
+ '{
+ "type": "UserAssigned",
+ "userAssignedIdentities": {
+ ($uamiId): {}
+ }
+ }'| jq -c '.' )
+ databaseType=mysql
+ dsConnectionURL=jdbc:mysql://$serverHost:3306/${{ inputs.databaseName }}
+ elif ${{ inputs.databaseType == 'postgresql(flexible)' }}; then
+ echo "Using postgresql(flexible) database type"
+ enableDB=true
+ databaseType=postgresql
+ dsConnectionURL="jdbc:postgresql://$serverHost:5432/${{ inputs.databaseName }}"
+ dbUser=${{ inputs.dbAdminUser }}
+ elif ${{ inputs.databaseType == 'postgresql-passwordless(flexible)' }}; then
+ echo "Using postgresql-passwordless(flexible) database type"
+ enableDB=true
+ enablePswlessConnection=true
+ dbUser=$(basename "$uamiId")
+ dbIdentity=$(jq -n \
+ --arg uamiId "$uamiId" \
+ '{
+ "type": "UserAssigned",
+ "userAssignedIdentities": {
+ ($uamiId): {}
+ }
+ }'| jq -c '.')
+ databaseType=postgresql
+ dsConnectionURL="jdbc:postgresql://$serverHost:5432/${{ inputs.databaseName }}"
+ fi
+
+ echo "enableDB=$enableDB" >> "$GITHUB_OUTPUT"
+ echo "enablePswlessConnection=$enablePswlessConnection" >> "$GITHUB_OUTPUT"
+ echo "databaseType=$databaseType" >> "$GITHUB_OUTPUT"
+ echo "dsConnectionURL=$dsConnectionURL" >> "$GITHUB_OUTPUT"
+ echo "dbUser=$dbUser" >> "$GITHUB_OUTPUT"
+ echo "dbIdentity=$dbIdentity" >> "$GITHUB_OUTPUT"
+ echo "dsConnectionURL=$dsConnectionURL"
+ echo "dbUser=$dbUser"
+ echo "Database parameters set successfully"
+
diff --git a/.github/actions/database-provision/action.yaml b/.github/actions/database-provision/action.yaml
new file mode 100644
index 000000000..582908a84
--- /dev/null
+++ b/.github/actions/database-provision/action.yaml
@@ -0,0 +1,287 @@
+name: Database provision
+description: Database provision
+
+inputs:
+ databaseType:
+ description: "databaseType"
+ required: true
+ resourceGroup:
+ description: "resourceGroup"
+ required: true
+ uamiName:
+ description: "uamiName"
+ required: true
+ location:
+ description: "location"
+ required: true
+ dbInstanceName:
+ description: "dbInstanceName"
+ required: true
+ dbPassword:
+ description: "dbPassword"
+ required: true
+ dbAdminUser:
+ description: "dbAdminUser"
+ required: false
+ default: "testuser"
+ databaseName:
+ description: "databaseName"
+ required: true
+ default: "testdb"
+
+outputs:
+ serverHost:
+ description: "serverHost"
+ value: ${{ steps.resource_outputs.outputs.serverHost }}
+ uamiId:
+ description: "uamiId"
+ value: ${{ steps.resource_outputs.outputs.uamiId }}
+
+runs:
+ using: "composite"
+ steps:
+ # 01-Deploy an instance of Azure SQL Database
+ - name: Echo inputs
+ shell: bash
+ run: |
+ echo "resourceGroup=${{ inputs.resourceGroup }}"
+ echo "uamiName=${{ inputs.uamiName }}"
+ echo "location=${{ inputs.location }}"
+ echo "dbInstanceName=${{ inputs.dbInstanceName }}"
+ echo "dbAdminUser=${{ inputs.dbAdminUser }}"
+ echo "databaseName=${{ inputs.databaseName }}"
+ - name: Deploy an instance of Azure SQL Database
+ id: deploy-mssqlserver
+ if: ${{ inputs.databaseType == 'mssqlserver' }}
+ shell: bash
+ run: |
+ az sql server create \
+ --resource-group ${{ inputs.resourceGroup }} \
+ --name ${{ inputs.dbInstanceName }} \
+ --admin-user ${{ inputs.dbAdminUser }} --admin-password ${{ inputs.dbPassword }} \
+ --location ${{ inputs.location }}
+ host=$(az sql server show \
+ --resource-group ${{ inputs.resourceGroup }} \
+ --name ${{ inputs.dbInstanceName }} \
+ --query "fullyQualifiedDomainName" -o tsv)
+ # Allow Azure services to access
+ az sql server firewall-rule create \
+ --resource-group ${{ inputs.resourceGroup }} \
+ --server ${{ inputs.dbInstanceName }} \
+ --name "AllowAllAzureIps" --start-ip-address 0.0.0.0 --end-ip-address 0.0.0.0
+ az sql db create --resource-group ${{ inputs.resourceGroup }} \
+ --server ${{ inputs.dbInstanceName }} \
+ --name ${{ inputs.databaseName }}
+
+ echo "serverHost=${host}" >> "$GITHUB_ENV"
+
+ - name: Deploy an instance of Azure SQL passwordless Database
+ id: deploy-mssqlserver-passwordless
+ if: ${{ inputs.databaseType == 'mssqlserver-passwordless' }}
+ shell: bash
+ run: |
+ # Create a user-assigned managed identity
+ az identity create --name ${{ inputs.uamiName }} --resource-group ${{ inputs.resourceGroup }}
+ # Export the resource ID of the user-assigned managed identity as an environment variable
+ uamiId=$(az identity show --name ${{ inputs.uamiName }} --resource-group ${{ inputs.resourceGroup }} --query id -o tsv)
+ objectId=$(az identity show --name ${{ inputs.uamiName }} --resource-group ${{ inputs.resourceGroup }} --query principalId -o tsv)
+
+ az sql server create \
+ --resource-group ${{ inputs.resourceGroup }} \
+ --name ${{ inputs.dbInstanceName }} \
+ --admin-user ${{ inputs.dbAdminUser }} \
+ --admin-password ${{ inputs.dbPassword }} \
+ --assign-identity \
+ --external-admin-principal-type Application \
+ --external-admin-name ${{ inputs.uamiName }} \
+ --external-admin-sid $objectId \
+ --location ${{ inputs.location }}
+ host=$(az sql server show \
+ --resource-group ${{ inputs.resourceGroup }} \
+ --name ${{ inputs.dbInstanceName }} \
+ --query "fullyQualifiedDomainName" -o tsv)
+ # Allow Azure services to access
+ az sql server firewall-rule create \
+ --resource-group ${{ inputs.resourceGroup }} \
+ --server ${{ inputs.dbInstanceName }} \
+ --name "AllowAllAzureIps" --start-ip-address 0.0.0.0 --end-ip-address 0.0.0.0
+ az sql db create --resource-group ${{ inputs.resourceGroup }} \
+ --server ${{ inputs.dbInstanceName }} \
+ --name ${{ inputs.databaseName }}
+
+ echo "serverHost=${host}" >> "$GITHUB_ENV"
+ echo "uamiId=${uamiId}" >> "$GITHUB_ENV"
+
+ - name: Deploy an instance of Azure Database for MySQL
+ id: deploy-mysql
+ if: ${{ inputs.databaseType == 'mysql(flexible)' }}
+ shell: bash
+ run: |
+ az mysql flexible-server create \
+ --resource-group ${{ inputs.resourceGroup }} \
+ --name ${{ inputs.dbInstanceName }} \
+ --admin-user myadmin \
+ --admin-password ${{ inputs.dbPassword }} \
+ --sku-name Standard_B1ms \
+ --location ${{ inputs.location }} \
+ --version 8.0.21 \
+ --yes
+
+ # Allow Azure services to access
+ az mysql flexible-server firewall-rule create \
+ --resource-group ${{ inputs.resourceGroup }} \
+ --name ${{ inputs.dbInstanceName }} \
+ --rule-name "AllowAllAzureIps" \
+ --start-ip-address 0.0.0.0 \
+ --end-ip-address 0.0.0.0
+ # Allow current IP to access MySQL server
+ currentIp=$(curl -s https://icanhazip.com)
+ az mysql flexible-server firewall-rule create \
+ --resource-group ${{ inputs.resourceGroup }} \
+ --name ${{ inputs.dbInstanceName }} \
+ --rule-name "AllowCurrentIp" \
+ --start-ip-address ${currentIp} \
+ --end-ip-address ${currentIp}
+ host=$(az mysql flexible-server show \
+ --resource-group ${{ inputs.resourceGroup }} \
+ --name ${{ inputs.dbInstanceName }} \
+ --query "fullyQualifiedDomainName" -o tsv)
+
+ wget --no-check-certificate https://dl.cacerts.digicert.com/DigiCertGlobalRootCA.crt.pem
+ mysql -h $host -u myadmin -p${{ inputs.dbPassword }} --ssl-ca=DigiCertGlobalRootCA.crt.pem << EOF
+ CREATE DATABASE ${{ inputs.databaseName }};
+ CREATE USER '${{ inputs.dbAdminUser }}'@'%' IDENTIFIED BY '${{ inputs.dbPassword }}';
+ GRANT ALL PRIVILEGES ON ${{ inputs.databaseName }} . * TO '${{ inputs.dbAdminUser }}'@'%';
+ FLUSH PRIVILEGES;
+ EOF
+
+ echo "serverHost=${host}" >> "$GITHUB_ENV"
+
+ - name: Deploy an instance of Azure Database for MySQL passwordless
+ id: deploy-mysql-passwordless
+ if: ${{ inputs.databaseType == 'mysql-passwordless(flexible)' }}
+ shell: bash
+ run: |
+ az mysql flexible-server create \
+ --resource-group ${{ inputs.resourceGroup }} \
+ --name ${{ inputs.dbInstanceName }} \
+ --admin-user myadmin \
+ --admin-password ${{ inputs.dbPassword }} \
+ --sku-name Standard_B1ms \
+ --location ${{ inputs.location }} \
+ --version 8.0.21 \
+ --yes
+
+ # Allow Azure services to access
+ az mysql flexible-server firewall-rule create \
+ --resource-group ${{ inputs.resourceGroup }} \
+ --name ${{ inputs.dbInstanceName }} \
+ --rule-name "AllowAllAzureIps" \
+ --start-ip-address 0.0.0.0 \
+ --end-ip-address 0.0.0.0
+ # Allow current IP to access MySQL server
+ currentIp=$(curl -s https://icanhazip.com)
+ az mysql flexible-server firewall-rule create \
+ --resource-group ${{ inputs.resourceGroup }} \
+ --name ${{ inputs.dbInstanceName }} \
+ --rule-name "AllowCurrentIp" \
+ --start-ip-address ${currentIp} \
+ --end-ip-address ${currentIp}
+ host=$(az mysql flexible-server show \
+ --resource-group ${{ inputs.resourceGroup }} \
+ --name ${{ inputs.dbInstanceName }} \
+ --query "fullyQualifiedDomainName" -o tsv)
+
+ wget --no-check-certificate https://dl.cacerts.digicert.com/DigiCertGlobalRootCA.crt.pem
+ mysql -h $host -u myadmin -p${{ inputs.dbPassword }} --ssl-ca=DigiCertGlobalRootCA.crt.pem << EOF
+ CREATE DATABASE ${{ inputs.databaseName }};
+ CREATE USER '${{ inputs.dbAdminUser }}'@'%' IDENTIFIED BY '${{ inputs.dbPassword }}';
+ GRANT ALL PRIVILEGES ON ${{ inputs.databaseName }} . * TO '${{ inputs.dbAdminUser }}'@'%';
+ FLUSH PRIVILEGES;
+ EOF
+
+ # Create a user-assigned managed identity
+ az identity create --name ${{ inputs.uamiName }} --resource-group ${{ inputs.resourceGroup }}
+ # Export the resource ID of the user-assigned managed identity as an environment variable
+ uamiId=$(az identity show --name ${{ inputs.uamiName }} --resource-group ${{ inputs.resourceGroup }} --query id -o tsv)
+ objectId=$(az identity show --name ${{ inputs.uamiName }} --resource-group ${{ inputs.resourceGroup }} --query principalId -o tsv)
+
+ az mysql flexible-server ad-admin create \
+ --resource-group ${{ inputs.resourceGroup }} \
+ --server-name ${{ inputs.dbInstanceName }} \
+ --display-name ${{ inputs.uamiName }} \
+ --object-id $objectId \
+ --identity $uamiId
+
+ echo "serverHost=${host}" >> "$GITHUB_ENV"
+ echo "uamiId=${uamiId}" >> "$GITHUB_ENV"
+
+ - name: Deploy an instance of Azure Database for PostgreSQL
+ id: deploy-postgresql
+ if: ${{ inputs.databaseType == 'postgresql(flexible)' }}
+ shell: bash
+ run: |
+ az postgres flexible-server create \
+ --resource-group ${{ inputs.resourceGroup }} \
+ --name ${{ inputs.dbInstanceName }} \
+ --admin-user ${{ inputs.dbAdminUser }} --admin-password ${{ inputs.dbPassword }} \
+ --public-access 0.0.0.0 \
+ --location ${{ inputs.location }} \
+ --yes
+ echo "Get the host name of the PostgreSQL server"
+ host=$(az postgres flexible-server show \
+ --resource-group ${{ inputs.resourceGroup }} \
+ --name ${{ inputs.dbInstanceName }} \
+ --query "fullyQualifiedDomainName" -o tsv)
+ echo "Create a database in the PostgreSQL server"
+ az postgres flexible-server db create --resource-group ${{ inputs.resourceGroup }} \
+ --server-name ${{ inputs.dbInstanceName }} \
+ --database-name ${{ inputs.databaseName }}
+
+ echo "serverHost=${host}" >> "$GITHUB_ENV"
+ - name: Deploy an instance of Azure Database for PostgreSQL passwordless
+ if: ${{ inputs.databaseType == 'postgresql-passwordless(flexible)' }}
+ id: deploy-postgresql-passwordless
+ shell: bash
+ run: |
+
+ # Create a user-assigned managed identity
+ az identity create --name ${{ inputs.uamiName }} --resource-group ${{ inputs.resourceGroup }}
+ # Export the resource ID of the user-assigned managed identity as an environment variable
+ uamiId=$(az identity show --name ${{ inputs.uamiName }} --resource-group ${{ inputs.resourceGroup }} --query id -o tsv)
+ objectId=$(az identity show --name ${{ inputs.uamiName }} --resource-group ${{ inputs.resourceGroup }} --query principalId -o tsv)
+
+ az postgres flexible-server create \
+ --microsoft-entra-auth Enabled \
+ --resource-group ${{ inputs.resourceGroup }} \
+ --name ${{ inputs.dbInstanceName }} \
+ --public-access 0.0.0.0 \
+ --location ${{ inputs.location }} \
+ --yes
+ echo "Set the user-assigned managed identity as the Microsoft Entra admin for the PostgreSQL server"
+ sleep 120 # Wait for the server to be ready
+ az postgres flexible-server microsoft-entra-admin create \
+ --resource-group ${{ inputs.resourceGroup }} \
+ --server-name ${{ inputs.dbInstanceName }} \
+ --display-name ${{ inputs.uamiName }} \
+ --object-id $objectId \
+ --type ServicePrincipal
+ echo "Get the host name of the PostgreSQL server"
+ host=$(az postgres flexible-server show \
+ --resource-group ${{ inputs.resourceGroup }} \
+ --name ${{ inputs.dbInstanceName }} \
+ --query "fullyQualifiedDomainName" -o tsv)
+ echo "Create a database in the PostgreSQL server"
+ az postgres flexible-server db create --resource-group ${{ inputs.resourceGroup }} \
+ --server-name ${{ inputs.dbInstanceName }} \
+ --database-name ${{ inputs.databaseName }}
+
+ echo "serverHost=${host}" >> "$GITHUB_ENV"
+ echo "uamiId=${uamiId}" >> "$GITHUB_ENV"
+
+ - name: Set outputs
+ id: resource_outputs
+ shell: bash
+ run: |
+ echo "uamiId=${{ env.uamiId }}" >> "$GITHUB_OUTPUT"
+ echo "serverHost=${{ env.serverHost }}" >> "$GITHUB_OUTPUT"
diff --git a/.github/actions/it/action.yml b/.github/actions/it/action.yml
new file mode 100644
index 000000000..c7774892f
--- /dev/null
+++ b/.github/actions/it/action.yml
@@ -0,0 +1,676 @@
+name: 'IT Validation Workflows'
+description: 'Execute validation workflows based on a validation plan'
+inputs:
+ it_file:
+ description: 'Path to the validation plan file'
+ required: true
+ github_token:
+ description: 'GitHub token for API access'
+ required: true
+ default: ${{ github.token }}
+
+outputs:
+ results:
+ description: 'JSON string containing the results of all workflow executions'
+ value: ${{ steps.collect-results.outputs.results }}
+ report_timestamp:
+ description: 'Timestamp of the generated report'
+ value: ${{ steps.generate-report.outputs.timestamp }}
+ report_url:
+ description: 'URL to the generated report'
+ value: ${{ steps.output-urls.outputs.report_url }}
+
+runs:
+ using: 'composite'
+ steps:
+ - name: Read validation plan
+ id: set-matrix
+ shell: bash
+ run: |
+ PLAN_FILE="${{ inputs.it_file }}"
+ echo "Looking for plan file: $PLAN_FILE"
+
+ if [ ! -f "$PLAN_FILE" ]; then
+ echo "Error: Plan file $PLAN_FILE not found"
+ echo "Current working directory: $(pwd)"
+ echo "GITHUB_WORKSPACE: $GITHUB_WORKSPACE"
+ echo "Listing current directory:"
+ ls -la
+ exit 1
+ fi
+
+ echo "Successfully found plan file: $PLAN_FILE"
+
+ # Create matrix from plan
+ MATRIX=$(jq -c '.validation_scenarios | map({
+ workflow: .workflow,
+ run_mode: (.run_mode // "parallel"),
+ scenarios: .scenarios
+ })' "$PLAN_FILE")
+
+ echo "matrix=$MATRIX" >> $GITHUB_OUTPUT
+
+ - name: Execute validation workflows
+ id: execute-workflows
+ uses: actions/github-script@v7
+ env:
+ GITHUB_TOKEN: ${{ inputs.github_token }}
+ with:
+ script: |
+ const matrix = ${{ steps.set-matrix.outputs.matrix }};
+ const allWorkflowRuns = [];
+
+ for (const item of matrix) {
+ const workflow = item.workflow;
+ const runMode = item.run_mode;
+ const scenarios = item.scenarios;
+ const workflowRuns = [];
+
+ console.log(`Starting to trigger workflow: ${workflow}`);
+ console.log(`Run mode: ${runMode}`);
+ console.log(`Number of scenarios to process: ${scenarios.length}`);
+ console.log(`Current owner: ${context.repo.owner}`);
+ console.log(`Current repo: ${context.repo.repo}`);
+
+ if (runMode === 'serial') {
+ console.log('Running scenarios in serial mode');
+
+ for (const scenario of scenarios) {
+ try {
+ const scenarioName = scenario.scenario;
+ const scenarioInputs = scenario.inputs;
+
+ console.log(`Triggering ${workflow} with scenario "${scenarioName}":`, JSON.stringify(scenarioInputs, null, 2));
+
+ // The github rest api for workflow dispatch requires all inputs to be strings.
+ // We need to convert any object values to JSON strings.
+ const inputs = Object.fromEntries(
+ Object.entries(scenarioInputs).map(([key, value]) => {
+ if (typeof value === 'object' && value !== null) {
+ return [key, JSON.stringify(value)];
+ }
+ return [key, value];
+ })
+ );
+
+ // Trigger the workflow
+ const dispatchResponse = await github.rest.actions.createWorkflowDispatch({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ workflow_id: workflow,
+ ref: context.ref,
+ inputs: inputs
+ });
+
+ console.log(`Workflow dispatch response:`, JSON.stringify(dispatchResponse.data, null, 2));
+
+ // Wait for 5 seconds for the workflow to be created
+ console.log('Waiting 5 seconds for workflow to be created...');
+ await new Promise(resolve => setTimeout(resolve, 5000));
+
+ // Get the latest workflow run
+ let attempts = 0;
+ const maxAttempts = 5;
+ let run = null;
+
+ while (attempts < maxAttempts) {
+ console.log(`Attempt ${attempts + 1} to find workflow run...`);
+ const runs = await github.rest.actions.listWorkflowRuns({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ workflow_id: workflow
+ });
+
+ if (runs.data.workflow_runs && runs.data.workflow_runs.length > 0) {
+ const potentialRun = runs.data.workflow_runs[0];
+ // Get detailed run information
+ const runDetails = await github.rest.actions.getWorkflowRun({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ run_id: potentialRun.id
+ });
+
+ console.log(`Found workflow run:`, JSON.stringify({
+ id: runDetails.data.id,
+ status: runDetails.data.status,
+ created_at: runDetails.data.created_at,
+ head_branch: runDetails.data.head_branch,
+ html_url: runDetails.data.html_url
+ }, null, 2));
+
+ run = runDetails.data;
+ break;
+ }
+
+ console.log('No matching workflow run found, waiting 5 seconds...');
+ await new Promise(resolve => setTimeout(resolve, 5000));
+ attempts++;
+ }
+
+ if (!run) {
+ console.log('Failed to find workflow run after all attempts');
+ continue;
+ }
+
+ // Wait for this workflow to complete before triggering the next one
+ console.log(`Waiting for workflow run ${run.id} to complete...`);
+ let status = run.status;
+ let waitAttempts = 0;
+ const maxWaitAttempts = 90; // 90 minutes maximum wait time
+
+ while (status !== 'completed' && waitAttempts < maxWaitAttempts) {
+ // Wait for 60 seconds between checks
+ await new Promise(resolve => setTimeout(resolve, 60000));
+
+ // Get the workflow run status
+ const runData = await github.rest.actions.getWorkflowRun({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ run_id: run.id
+ });
+
+ status = runData.data.status;
+ console.log(`Workflow ${workflow} status: ${status} (wait attempt ${waitAttempts + 1}/${maxWaitAttempts})`);
+
+ // If the workflow is still queued or in progress, continue waiting
+ if (status === 'queued' || status === 'in_progress') {
+ waitAttempts++;
+ continue;
+ }
+
+ // If the workflow is completed, break the loop
+ if (status === 'completed') {
+ console.log(`Workflow ${workflow} completed with conclusion: ${runData.data.conclusion}`);
+ break;
+ }
+
+ // If we get here, the status is something unexpected
+ console.log(`Unexpected status for workflow ${workflow}: ${status}`);
+ waitAttempts++;
+ }
+
+ if (status !== 'completed') {
+ console.log(`Workflow ${workflow} did not complete within the maximum wait time. Moving to next scenario.`);
+ }
+
+ workflowRuns.push({
+ workflow: workflow,
+ scenario: scenarioInputs,
+ scenarioName: scenarioName,
+ runId: run.id,
+ runUrl: run.html_url,
+ startTime: run.created_at
+ });
+
+ console.log(`Successfully processed workflow run: ${run.id}`);
+ } catch (error) {
+ console.error(`Error processing scenario:`, error);
+ console.error(`Error details:`, JSON.stringify(error, null, 2));
+ }
+ }
+ } else {
+ console.log('Running scenarios in parallel mode');
+
+ for (const scenario of scenarios) {
+ try {
+ const scenarioName = scenario.scenario;
+ const scenarioInputs = scenario.inputs;
+
+ console.log(`Triggering ${workflow} with scenario "${scenarioName}":`, JSON.stringify(scenarioInputs, null, 2));
+
+ // The github rest api for workflow dispatch requires all inputs to be strings.
+ // We need to convert any object values to JSON strings.
+ const inputs = Object.fromEntries(
+ Object.entries(scenarioInputs).map(([key, value]) => {
+ if (typeof value === 'object' && value !== null) {
+ return [key, JSON.stringify(value)];
+ }
+ return [key, value];
+ })
+ );
+
+ // Trigger the workflow
+ const dispatchResponse = await github.rest.actions.createWorkflowDispatch({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ workflow_id: workflow,
+ ref: context.ref,
+ inputs: inputs
+ });
+
+ console.log(`Workflow dispatch response:`, JSON.stringify(dispatchResponse.data, null, 2));
+
+ // Wait for 5 seconds for the workflow to be created
+ console.log('Waiting 5 seconds for workflow to be created...');
+ await new Promise(resolve => setTimeout(resolve, 5000));
+
+ // Get the latest workflow run
+ let attempts = 0;
+ const maxAttempts = 5;
+ let run = null;
+
+ while (attempts < maxAttempts) {
+ console.log(`Attempt ${attempts + 1} to find workflow run...`);
+ const runs = await github.rest.actions.listWorkflowRuns({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ workflow_id: workflow
+ });
+
+ if (runs.data.workflow_runs && runs.data.workflow_runs.length > 0) {
+ const potentialRun = runs.data.workflow_runs[0];
+ // Get detailed run information
+ const runDetails = await github.rest.actions.getWorkflowRun({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ run_id: potentialRun.id
+ });
+
+ console.log(`Found workflow run:`, JSON.stringify({
+ id: runDetails.data.id,
+ status: runDetails.data.status,
+ created_at: runDetails.data.created_at,
+ head_branch: runDetails.data.head_branch,
+ html_url: runDetails.data.html_url
+ }, null, 2));
+
+ run = runDetails.data;
+ break;
+ }
+
+ console.log('No matching workflow run found, waiting 5 seconds...');
+ await new Promise(resolve => setTimeout(resolve, 5000));
+ attempts++;
+ }
+
+ if (!run) {
+ console.log('Failed to find workflow run after all attempts');
+ continue;
+ }
+
+ workflowRuns.push({
+ workflow: workflow,
+ scenario: scenarioInputs,
+ scenarioName: scenarioName,
+ runId: run.id,
+ runUrl: run.html_url,
+ startTime: run.created_at
+ });
+
+ console.log(`Successfully tracked workflow run: ${run.id}`);
+ } catch (error) {
+ console.error(`Error processing scenario:`, error);
+ console.error(`Error details:`, JSON.stringify(error, null, 2));
+ }
+ }
+ }
+
+ allWorkflowRuns.push(...workflowRuns);
+ }
+
+ console.log(`Total workflow runs tracked: ${allWorkflowRuns.length}`);
+ console.log('Workflow runs:', JSON.stringify(allWorkflowRuns, null, 2));
+
+ core.setOutput('workflow_runs', JSON.stringify(allWorkflowRuns));
+
+ - name: Wait for workflows and collect results
+ id: collect-results
+ uses: actions/github-script@v7
+ env:
+ GITHUB_TOKEN: ${{ inputs.github_token }}
+ with:
+ script: |
+ const workflowRuns = JSON.parse('${{ steps.execute-workflows.outputs.workflow_runs }}');
+ const results = [];
+
+ for (const run of workflowRuns) {
+ console.log(`Processing workflow ${run.workflow} run ${run.runId}...`);
+
+ // Get the current workflow run status
+ const runData = await github.rest.actions.getWorkflowRun({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ run_id: run.runId
+ });
+
+ let status = runData.data.status;
+
+ // If the workflow is already completed (likely from serial execution), use it directly
+ if (status === 'completed') {
+ console.log(`Workflow ${run.workflow} is already completed with conclusion: ${runData.data.conclusion}`);
+ results.push({
+ workflow: run.workflow,
+ scenario: run.scenario,
+ scenarioName: run.scenarioName,
+ status: runData.data.conclusion,
+ runId: run.runId,
+ runUrl: run.runUrl,
+ startTime: run.startTime,
+ endTime: runData.data.updated_at
+ });
+ continue;
+ }
+
+ // For workflows that are still running (parallel mode), wait for completion
+ console.log(`Waiting for workflow ${run.workflow} run ${run.runId}...`);
+
+ let attempts = 0;
+ const maxAttempts = 60; // 60 minutes maximum wait time
+
+ while (status !== 'completed' && attempts < maxAttempts) {
+ try {
+ // Wait for 60 seconds between checks
+ await new Promise(resolve => setTimeout(resolve, 60000));
+
+ // Get the workflow run status
+ const runData = await github.rest.actions.getWorkflowRun({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ run_id: run.runId
+ });
+
+ status = runData.data.status;
+ console.log(`Workflow ${run.workflow} status: ${status} (attempt ${attempts + 1}/${maxAttempts})`);
+
+ // If the workflow is still queued or in progress, continue waiting
+ if (status === 'queued' || status === 'in_progress') {
+ attempts++;
+ continue;
+ }
+
+ // If the workflow is completed, break the loop
+ if (status === 'completed') {
+ break;
+ }
+
+ // If we get here, the status is something unexpected
+ console.log(`Unexpected status for workflow ${run.workflow}: ${status}`);
+ attempts++;
+ } catch (error) {
+ console.log(`Error checking workflow status: ${error.message}`);
+ attempts++;
+ }
+ }
+
+ if (status !== 'completed') {
+ console.log(`Workflow ${run.workflow} did not complete within the maximum wait time`);
+ results.push({
+ workflow: run.workflow,
+ scenario: run.scenario,
+ scenarioName: run.scenarioName,
+ status: 'timeout',
+ runId: run.runId,
+ runUrl: run.runUrl,
+ startTime: run.startTime,
+ endTime: new Date().toISOString()
+ });
+ continue;
+ }
+
+ // Get the final workflow run data
+ const finalRunData = await github.rest.actions.getWorkflowRun({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ run_id: run.runId
+ });
+
+ results.push({
+ workflow: run.workflow,
+ scenario: run.scenario,
+ scenarioName: run.scenarioName,
+ status: finalRunData.data.conclusion,
+ runId: run.runId,
+ runUrl: run.runUrl,
+ startTime: run.startTime,
+ endTime: finalRunData.data.updated_at
+ });
+ }
+
+ core.setOutput('results', JSON.stringify(results));
+
+ - name: Generate Markdown report
+ id: generate-report
+ shell: bash
+ run: |
+ TIMESTAMP=$(date +"%Y-%m-%d-%H-%M-%S")
+ echo "timestamp=$TIMESTAMP" >> $GITHUB_OUTPUT
+ echo "Current directory: $(pwd)"
+
+ # Generate table rows and summary
+ RESULTS='${{ steps.collect-results.outputs.results }}'
+ rows=""
+ total=0
+ success=0
+ failure=0
+ timeout=0
+ cancelled=0
+ other_failed=0
+ for row in $(echo "$RESULTS" | jq -r '.[] | @base64'); do
+ _jq() {
+ echo ${row} | base64 --decode | jq -r "$1"
+ }
+ workflow=$(_jq '.workflow')
+ scenario_name=$(_jq '.scenarioName // empty')
+ scenario=$(_jq '.scenario | if type=="object" then to_entries | map("\(.key): \(.value)") | join(", ") else tostring end')
+ status=$(_jq '.status')
+ runUrl=$(_jq '.runUrl')
+ startTime=$(_jq '.startTime')
+ endTime=$(_jq '.endTime')
+
+ # Use scenario name if available, otherwise use the scenario details
+ display_scenario="${scenario_name:-${scenario}}"
+
+ # Calculate duration
+ start=$(date -d "$startTime" +%s)
+ end=$(date -d "$endTime" +%s)
+ duration=$((end - start))
+ duration_str=$(printf '%dh:%dm:%ds' $((duration/3600)) $((duration%3600/60)) $((duration%60)))
+
+ # Count status
+ total=$((total+1))
+ if [ "$status" = "success" ]; then
+ success=$((success+1))
+ elif [ "$status" = "failure" ]; then
+ failure=$((failure+1))
+ elif [ "$status" = "timeout" ]; then
+ timeout=$((timeout+1))
+ elif [ "$status" = "cancelled" ]; then
+ cancelled=$((cancelled+1))
+ else
+ # Any other non-success status
+ other_failed=$((other_failed+1))
+ fi
+
+ # Create table row
+ rows+="| ${workflow} | \`${display_scenario}\` | ${status} | ${duration_str} | [View Run](${runUrl}) |\n"
+ done
+
+ # Create a temporary file with the content
+ {
+ echo "# Validation Workflow Results"
+ echo ""
+ echo "## Summary"
+ echo "- Total Workflows: ${total}"
+ echo "- Successful: ${success}"
+ echo "- Failed: ${failure}"
+ echo "- Timed Out: ${timeout}"
+ echo "- Cancelled: ${cancelled}"
+ echo "- Other Failed: ${other_failed}"
+ echo ""
+ echo "## Detailed Results"
+ echo ""
+ echo "| Workflow | Scenario | Status | Duration | Run URL |"
+ echo "|----------|----------|---------|-----------|----------|"
+ echo -e "${rows}"
+ echo ""
+ echo "## Execution Notes"
+ echo "- Workflows marked with \`run_mode: serial\` are executed one after another"
+ echo "- Other workflows are executed in parallel"
+ } > "${TIMESTAMP}-report.md"
+
+ - name: Upload report
+ uses: actions/upload-artifact@v4.6.2
+ with:
+ name: validation-report-${{ steps.generate-report.outputs.timestamp }}
+ path: ${{ steps.generate-report.outputs.timestamp }}-report.md
+
+ - name: Setup Git
+ shell: bash
+ run: |
+ git config --global user.name "GitHub Actions"
+ git config --global user.email "actions@github.com"
+
+ - name: Create/Update IT Branch
+ shell: bash
+ run: |
+ # Debug information
+ echo "Current directory: $(pwd)"
+ echo "Listing files in current directory:"
+ ls -la
+ echo "Listing files in workspace:"
+ ls -la $GITHUB_WORKSPACE
+
+ # Find the report file
+ REPORT_FILE=$(find $GITHUB_WORKSPACE -name "${{ steps.generate-report.outputs.timestamp }}-report.md")
+ echo "Found report file at: $REPORT_FILE"
+
+ if [ ! -f "$REPORT_FILE" ]; then
+ echo "Error: Report file not found!"
+ exit 1
+ fi
+
+ # Fetch all branches
+ git fetch origin
+
+ # Check if it branch exists remotely
+ if git ls-remote --exit-code --heads origin it; then
+ echo "it branch exists. Checking out..."
+ git checkout it
+ git pull origin it
+ else
+ echo "it branch does not exist. Creating new it branch from current HEAD..."
+ git checkout -b it
+ fi
+
+ # Use existing it-report directory if present, otherwise create it
+ if [ -d "it-report" ]; then
+ echo "it-report directory exists. Using existing directory."
+ else
+ echo "it-report directory does not exist. Creating it."
+ mkdir it-report
+ fi
+
+ # Copy the report to it-report directory
+ cp "$REPORT_FILE" it-report/
+
+ # Add and commit the report
+ git add it-report/${{ steps.generate-report.outputs.timestamp }}-report.md
+ git commit -m "Add validation report ${{ steps.generate-report.outputs.timestamp }}" || echo "Nothing to commit."
+
+ # Push to the it branch
+ git push origin it
+
+ - name: Output Report URL
+ id: output-urls
+ shell: bash
+ run: |
+ REPORT_URL="https://github.com/${{ github.repository }}/blob/it/it-report/${{ steps.generate-report.outputs.timestamp }}-report.md"
+ RAW_REPORT_URL="https://raw.githubusercontent.com/${{ github.repository }}/it/it-report/${{ steps.generate-report.outputs.timestamp }}-report.md"
+
+ echo "::notice::📊 Validation Report URL: $REPORT_URL"
+ echo "::notice::📊 Raw Report URL: $RAW_REPORT_URL"
+ echo "report_url=$REPORT_URL" >> $GITHUB_OUTPUT
+
+ - name: Check workflow results and fail if any failed
+ shell: bash
+ run: |
+ RESULTS='${{ steps.collect-results.outputs.results }}'
+
+ # Parse results and check for failures
+ failed_workflows=()
+ timeout_workflows=()
+ cancelled_workflows=()
+ other_failed_workflows=()
+ total=0
+ success=0
+ failure=0
+ timeout=0
+ cancelled=0
+ other_failed=0
+
+ for row in $(echo "$RESULTS" | jq -r '.[] | @base64'); do
+ _jq() {
+ echo ${row} | base64 --decode | jq -r "$1"
+ }
+ workflow=$(_jq '.workflow')
+ scenario_name=$(_jq '.scenarioName // empty')
+ scenario=$(_jq '.scenario | if type=="object" then to_entries | map("\(.key): \(.value)") | join(", ") else tostring end')
+ status=$(_jq '.status')
+ runUrl=$(_jq '.runUrl')
+
+ # Use scenario name if available, otherwise use the scenario details
+ display_scenario="${scenario_name:-${scenario}}"
+
+ # Count status and track failed workflows
+ total=$((total+1))
+ if [ "$status" = "success" ]; then
+ success=$((success+1))
+ elif [ "$status" = "failure" ]; then
+ failure=$((failure+1))
+ failed_workflows+=("${workflow} (${display_scenario}): ${runUrl}")
+ elif [ "$status" = "timeout" ]; then
+ timeout=$((timeout+1))
+ timeout_workflows+=("${workflow} (${display_scenario}): ${runUrl}")
+ elif [ "$status" = "cancelled" ]; then
+ cancelled=$((cancelled+1))
+ cancelled_workflows+=("${workflow} (${display_scenario}): ${runUrl}")
+ else
+ # Any other non-success status should be treated as a failure
+ other_failed=$((other_failed+1))
+ other_failed_workflows+=("${workflow} (${display_scenario}) [${status}]: ${runUrl}")
+ fi
+ done
+
+ # Display summary
+ echo "::notice::📊 Workflow Execution Summary:"
+ echo "::notice:: Total: ${total}, Success: ${success}, Failed: ${failure}, Timeout: ${timeout}, Cancelled: ${cancelled}, Other Failed: ${other_failed}"
+
+ # If there are failed workflows, display them and fail the IT
+ if [ ${#failed_workflows[@]} -gt 0 ]; then
+ echo "::error::❌ The following workflows failed:"
+ for failed in "${failed_workflows[@]}"; do
+ echo "::error:: - ${failed}"
+ done
+ fi
+
+ # If there are timeout workflows, display them and fail the IT
+ if [ ${#timeout_workflows[@]} -gt 0 ]; then
+ echo "::error::⏰ The following workflows timed out:"
+ for timeout in "${timeout_workflows[@]}"; do
+ echo "::error:: - ${timeout}"
+ done
+ fi
+
+ # If there are cancelled workflows, display them and fail the IT
+ if [ ${#cancelled_workflows[@]} -gt 0 ]; then
+ echo "::error::🚫 The following workflows were cancelled:"
+ for cancelled in "${cancelled_workflows[@]}"; do
+ echo "::error:: - ${cancelled}"
+ done
+ fi
+
+ # If there are other failed workflows, display them and fail the IT
+ if [ ${#other_failed_workflows[@]} -gt 0 ]; then
+ echo "::error::❌ The following workflows failed with other statuses:"
+ for other_failed in "${other_failed_workflows[@]}"; do
+ echo "::error:: - ${other_failed}"
+ done
+ fi
+
+ # Fail the IT workflow if any workflow failed, timed out, was cancelled, or had other failure statuses
+ if [ ${#failed_workflows[@]} -gt 0 ] || [ ${#timeout_workflows[@]} -gt 0 ] || [ ${#cancelled_workflows[@]} -gt 0 ] || [ ${#other_failed_workflows[@]} -gt 0 ]; then
+ echo "::error::❌ IT workflow failed because ${#failed_workflows[@]} workflow(s) failed, ${#timeout_workflows[@]} workflow(s) timed out, ${#cancelled_workflows[@]} workflow(s) were cancelled, and ${#other_failed_workflows[@]} workflow(s) had other failure statuses."
+ exit 1
+ fi
+
+ echo "::notice::✅ All workflows completed successfully!"
\ No newline at end of file
diff --git a/.github/actions/setupmaven/action.yml b/.github/actions/setupmaven/action.yml
new file mode 100644
index 000000000..50f50652e
--- /dev/null
+++ b/.github/actions/setupmaven/action.yml
@@ -0,0 +1,26 @@
+name: Set Up Maven
+description: Set up Maven with github token.
+inputs:
+ token:
+ description: "GitHub token"
+ required: true
+runs:
+ using: "composite"
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Apache Maven and JDK
+ uses: actions/setup-java@v4
+ with:
+ distribution: 'microsoft'
+ java-version: 21
+ server-id: github # Value of the distributionManagement/repository/id field of the pom.xml
+ server-username: MAVEN_USERNAME # env variable for username
+ server-password: MAVEN_TOKEN # env variable for token
+ - name: Set Maven env
+ env:
+ MAVEN_USERNAME: github
+ MAVEN_TOKEN: ${{ inputs.token }}
+ shell: bash
+ run: |
+ echo "MAVEN_USERNAME=${MAVEN_USERNAME}" >> $GITHUB_ENV
+ echo "MAVEN_TOKEN=${MAVEN_TOKEN}" >> $GITHUB_ENV
\ No newline at end of file
diff --git a/.github/actions/setvars/action.yml b/.github/actions/setvars/action.yml
new file mode 100644
index 000000000..a9991b1ec
--- /dev/null
+++ b/.github/actions/setvars/action.yml
@@ -0,0 +1,13 @@
+name: "Set environment variables"
+description: "Configures environment variables for a workflow"
+inputs:
+ varFilePath:
+ description: "File path to variable file or directory. Defaults to ./.github/variables/* if none specified and runs against each file in that directory."
+ required: false
+ default: ./.github/variables/*
+runs:
+ using: "composite"
+ steps:
+ - run: |
+ sed "" ${{ inputs.varFilePath }} >> $GITHUB_ENV
+ shell: bash
diff --git a/.github/docs/check-arm-vm-size.md b/.github/docs/check-arm-vm-size.md
new file mode 100644
index 000000000..dd318afb5
--- /dev/null
+++ b/.github/docs/check-arm-vm-size.md
@@ -0,0 +1,73 @@
+## GitHub Action: Check ARM VM Size Changes
+
+### Overview
+This GitHub Action runs on a schedule to check for changes in Azure ARM VM sizes and creates a pull request to update configurations if changes are detected.
+
+The action will compare the latest ARM VM sizes queried using AZ CLI with those listed in the variable azure.armBased.vmSize.list within oracle/weblogic-azure/resources/azure-common.properties. If changes are detected, it will initiate a pull request to the main branch of the current repository that runs the action.
+
+### Schedule
+- **Frequency:** Every 14 days (2 weeks)
+- **Schedule Expression:** `0 0 */14 * *` (Runs at midnight (00:00) UTC)
+
+The schedule event only happens in [azure-javaee/weblogic-azure](https://github.com/azure-javaee/weblogic-azure).
+
+If you want to run the action in your repository, you have to trigger it from Web Browser.
+
+### Environment Variables
+- **azureCredentials:** Secret for Azure credentials
+- **repoName:** Repository name set to "weblogic-azure"
+- **userEmail:** Secret for user Email of GitHub acount to access GitHub repository
+- **userName:** Secret for user name of GitHub account
+
+### Jobs
+#### check-vm-sizes
+- **Runs on:** `ubuntu-latest`
+- **Steps:**
+ 1. **Checkout repository:** Checks out the repository using `actions/checkout@v2`.
+
+ 2. **Azure Login:** Logs into Azure using `azure/login@v1`.
+
+ 3. **Check for VM size changes:**
+ - Reads from `resources/azure-common.properties`.
+ - Extracts and compares current VM sizes with the latest available.
+ - Determines if there are changes and prepares data for output.
+
+ 4. **Create PR if changes detected:**
+ - Conditionally creates a pull request if changes in ARM VM sizes are detected.
+ - Updates the ARM VM sizes configuration in `resources/azure-common.properties`.
+ - Commits changes to a new branch and pushes to origin.
+ - Creates a pull request with a title and description based on detected changes.
+
+### Run the action
+
+You can use `.github/resource/azure-credential-setup-wls-vm.sh` to create GitHub Action Secret for the pipeline.
+
+1. Fill in `.github/resource/credentials-params-wls-vm.yaml` with your values.
+
+ | Variable Name | Value |
+ |----------------|----------------------|
+ | OTN_USERID | Oracle single sign-on userid. If you don't have one, sign up from [Create Your Oracle Account](https://profile.oracle.com/myprofile/account/create-account.jspx?nexturl=https%3A%2F%2Fsupport.oracle.com&pid=mos) |
+ | OTN_PASSWORD | Password for Oracle single sign-on userid. |
+ | WLS_PSW | Password for WebLogic Server. |
+ | USER_EMAIL | User Email of GitHub acount to access GitHub repository. |
+ | USER_NAME | User name of GitHub account. |
+ | GIT_TOKEN | GitHub token to access GitHub repository.
Make sure the token have permissions:
- Read and write of Pull requests.
- Read and write of Contents. |
+
+2. Set up secret
+
+ Run `azure-credential-setup-wls-vm.sh` to set up secret.
+
+ ```shell
+ bash .github/resource/azure-credential-setup-wls-vm.sh
+ ```
+
+ Follow the output to set up secrets.
+
+3. Trigger the workflow
+
+ - Fork this repo from [oracle/weblogic-azure](https://github.com/azure-javaee/weblogic-azure).
+
+ - Enable workflow in the fork. Select **Actions**, then follow the instructions to enable workflow.
+
+ - Select **Actions** -> **Check ARM VM Size Changes** -> **Run workflow** to run the workflow.
+
diff --git a/.github/it/README.md b/.github/it/README.md
new file mode 100644
index 000000000..268a55c15
--- /dev/null
+++ b/.github/it/README.md
@@ -0,0 +1,286 @@
+# IT Validation Configuration
+
+## Overview
+
+The IT validation system is a comprehensive integration testing framework designed to validate Oracle WebLogic Server deployments on Azure across multiple scenarios and configurations. It automates the execution of various deployment scenarios, monitors their progress, and generates detailed reports to ensure the reliability and quality of the Azure WebLogic templates.
+
+### Key Features
+
+- **Multi-Scenario Testing**: Execute multiple test scenarios simultaneously or sequentially
+- **Flexible Execution Modes**: Support for both parallel and serial execution modes
+- **Comprehensive Reporting**: Detailed reports with success/failure statistics and execution URLs
+- **Automated Monitoring**: Real-time tracking of workflow execution with timeout protection
+- **Resource Management**: Efficient cleanup and resource optimization for cost-effective testing
+
+### Use Cases
+
+- **Regression Testing**: Validate WebLogic templates after code changes or updates
+- **Release Validation**: Comprehensive testing before production releases
+- **Configuration Testing**: Verify different deployment configurations and parameters
+- **Performance Monitoring**: Track deployment times and resource utilization
+
+## Table of Contents
+
+- [System Architecture](#system-architecture)
+- [Configuration Structure](#configuration-structure)
+ - [Scenarios Structure](#scenarios-structure)
+ - [Execution Modes](#execution-modes)
+- [How It Works](#how-it-works)
+- [Available Files](#available-files)
+ - [File Content Overview](#file-content-overview)
+- [Getting Started](#getting-started)
+ - [Quick Start Guide](#quick-start-guide)
+ - [Prerequisites](#prerequisites)
+- [IT Action Usage](#it-action-usage)
+ - [Action Inputs](#action-inputs)
+ - [Action Outputs](#action-outputs)
+- [Structure Requirements](#structure-requirements)
+- [Serial vs Parallel Execution](#serial-vs-parallel-execution)
+- [Report Generation](#report-generation)
+ - [Status Tracking](#status-tracking)
+ - [Accessing Reports](#accessing-reports)
+- [Error Handling](#error-handling)
+- [Trouble Shooting](#trouble-shooting)
+
+
+## System Architecture
+
+The IT validation system consists of:
+
+1. **Validation Plan Files** (this directory): JSON files defining what to test
+2. **IT Action** (`/.github/actions/it/action.yml`): Reusable composite action that executes the plans
+3. **IT Workflows** (`/.github/workflows/it-validation-*.yaml`): Workflows that trigger the action with specific plans
+3. **Target Workflows** (`/.github/workflows/testWls*.yml` and `buildWls*.yml`): The actual validation workflows that get executed
+
+## Configuration Structure
+
+The validation plan files use the following structure:
+
+### Scenarios Structure
+Each validation plan defines scenarios with descriptive names:
+
+```json
+{
+ "validation_scenarios": [
+ {
+ "workflow": "testWlsVmAdmin.yml",
+ "run_mode": "serial",
+ "scenarios": [
+ {
+ "scenario": "Test Admin Server on VM with mssqlserver",
+ "inputs": {
+ "location": "centralus"
+ }
+ }
+ ]
+ }
+ ]
+}
+```
+
+### Execution Modes
+
+You can control how scenarios within a workflow are executed by using the optional `run_mode` property:
+
+- **`"run_mode": "serial"`**: Scenarios are executed one after another. Each scenario must complete before the next one starts.
+- **`"run_mode": "parallel"`** or **no `run_mode` specified**: Scenarios are executed simultaneously (default behavior).
+
+**Example with serial execution:**
+```json
+{
+ "validation_scenarios": [
+ {
+ "workflow": "testWlsVmCluster.yml",
+ "run_mode": "serial",
+ "scenarios": [
+ {
+ "scenario": "First scenario",
+ "inputs": { /* ... */ }
+ },
+ {
+ "scenario": "Second scenario",
+ "inputs": { /* ... */ }
+ }
+ ]
+ }
+ ]
+}
+```
+
+**When to use serial mode:**
+- Resource-intensive scenarios that might conflict if run simultaneously
+- Scenarios that need to run in a specific order
+- Limited resource environments where parallel execution might cause failures
+
+## How It Works
+
+1. **IT Workflows**: The `it-validation-*.yaml` workflows are triggered (manually or scheduled)
+
+2. **Plan File Mapping**: Each IT workflow maps its input to a specific validation plan file in this directory
+
+3. **Action Execution**: The workflow calls the IT action (`/.github/actions/it/action.yml`) with the plan file path
+
+4. **Plan Processing**: The action reads the validation plan and processes each scenario
+
+5. **Execution Mode**: The optional `run_mode` property controls whether scenarios are executed serially or in parallel
+
+6. **Workflow Triggering**: The action triggers the specified target workflows with the scenario inputs
+
+7. **Monitoring**: The action monitors workflow execution and waits for completion
+
+8. **Reporting**: Results are compiled into comprehensive reports and stored in the `it` branch
+
+## Available Files
+
+- `validation-plan-aks.json`: Azure Kubernetes Service (AKS) validation scenarios for WebLogic Server deployments
+- `validation-plan-build.json`: Build-only validation scenarios for template compilation and syntax checking
+- `validation-plan-vm-admin.json`: WebLogic Admin Server on VM validation scenarios
+- `validation-plan-vm-cluster.json`: WebLogic Configured Cluster on VM validation scenarios
+- `validation-plan-vm-dynamic-cluster.json`: WebLogic Dynamic Cluster on VM validation scenarios
+
+### File Content Overview
+
+Each validation plan targets specific WebLogic deployment scenarios:
+
+- **AKS Plans**: Test WebLogic Server deployments on Azure Kubernetes Service with various database configurations
+- **VM Plans**: Test WebLogic Server deployments on Azure Virtual Machines in different topologies (Admin Server, Configured Cluster, Dynamic Cluster)
+- **Build Plans**: Validate artifact compilation and template syntax without actual deployments
+
+## Getting Started
+
+### Quick Start Guide
+
+1. **Choose a Validation Plan**: Select the appropriate validation plan file based on your testing needs:
+ - For AKS deployments: `validation-plan-aks.json`
+ - For Admin Server on VM: `validation-plan-vm-admin.json`
+ - For Configured Cluster on VM: `validation-plan-vm-cluster.json`
+ - For Dynamic Cluster on VM: `validation-plan-vm-dynamic-cluster.json`
+ - For build validation only: `validation-plan-build.json`
+
+2. **Trigger IT Validation**: Use the GitHub Actions interface to manually trigger a IT validation workflow:
+ - Go to the "Actions" tab in the repository
+ - Select the appropriate `it-validation-*` workflow:
+ - `IT Validation for AKS` - for AKS deployments
+ - `IT Validation for VM Admin` - for Admin Server on VM
+ - `IT Validation for VM Cluster` - for Configured Cluster on VM
+ - `IT Validation for VM Dynamic Cluster` - for Dynamic Cluster on VM
+ - `IT Validation for Build` - for build-only validation
+ - Click "Run workflow" and select your desired validation plan
+
+3. **Monitor Progress**: Track the execution progress in the Actions tab and view real-time logs
+
+4. **Review Results**: Check the generated reports in the `it` branch under `it-report/` directory
+
+### Prerequisites
+
+Before using the IT validation system, ensure:
+
+- [ ] Azure subscription with appropriate permissions
+- [ ] GitHub repository with Actions enabled
+- [ ] Required secrets configured in repository settings. The repository secrets set by the [setup-credentials.sh](../workflows/setup-credentials.sh) script must be set with current and valid values before any of these workflows will run.
+- [ ] Access to the `it` branch for report storage
+
+## IT Action Usage
+
+The validation plans are consumed by the IT action located at `/.github/actions/it/action.yml`.
+
+### Action Inputs
+
+| Input | Description | Required |
+|-------|-------------|----------|
+| `it_file` | Path to the validation plan file | Yes |
+
+### Action Outputs
+
+| Output | Description |
+|--------|-------------|
+| `results` | JSON string containing the results of all workflow executions |
+| `report_timestamp` | Timestamp of the generated report |
+| `report_url` | URL to the generated report on the IT branch |
+
+## Structure Requirements
+
+- Each plan must have a `validation_scenarios` array
+- Each item in the array must have a `workflow` and `scenarios` field
+- Each scenario must have a `scenario` name and an `inputs` object
+- The optional `run_mode` field can be set to `"serial"` or `"parallel"` (default)
+- Only the `inputs` object content is passed to the target workflow
+
+## Serial vs Parallel Execution
+
+### Parallel Execution (Default)
+- All scenarios within a workflow are triggered simultaneously
+- Faster overall execution time
+- Suitable for independent scenarios that don't compete for resources
+
+### Serial Execution
+- Scenarios are executed one after another
+- Each scenario must complete before the next one starts
+- Longer overall execution time but better resource management
+- Includes waiting and monitoring between scenarios
+- Recommended for resource-intensive workloads or debugging
+
+## Report Generation
+
+The IT action generates comprehensive reports that include:
+
+- **Summary Statistics**: Total workflows, success/failure counts including cancelled and timeout scenarios
+- **Detailed Results**: Individual workflow results with duration and status
+- **Execution URLs**: Direct links to workflow runs
+- **Execution Notes**: Information about serial vs parallel execution
+
+Reports are:
+1. Uploaded as GitHub Actions artifacts
+2. Committed to the `it` branch in the `it-report/` directory
+3. Accessible via the repository's IT branch
+
+### Status Tracking
+
+The system tracks all execution outcomes:
+- **Success**: Workflows completed successfully
+- **Failure**: Workflows failed during execution
+- **Timeout**: Workflows exceeded the 60-minute timeout limit
+- **Cancelled**: Workflows manually cancelled by users
+- **Other Failed**: Workflows with any other non-success status
+
+### Accessing Reports
+
+Reports can be accessed in multiple ways:
+
+1. **GitHub Actions Artifacts**: Download reports directly from the workflow run artifacts
+2. **IT Branch**: Browse reports in the `it` branch under `it-report/` directory
+3. **Direct Links**: Use the `report_url` output from the IT action
+4. **API Access**: Programmatically access reports via GitHub API
+
+#### Report File Naming Convention
+
+Reports follow the naming pattern: `report-YYYYMMDD-HHMMSS.json`
+
+Example: `report-20250804-103000.json` (August 4, 2025 at 10:30:00 UTC)
+
+## Error Handling
+
+The IT action includes robust error handling:
+- **Timeout Protection**: 60-minute maximum wait time per workflow
+- **Failure Detection**: IT workflow fails if any triggered workflow fails, times out, or is cancelled
+
+## Trouble Shooting
+
+### Debugging with tmate
+
+One of the biggest pain points to develop GitHub actions for our Java EE solution offerings is that it's hard to debug them by direct interacting with the host system on which the actual Actions are running.
+
+I found a GitHub action `tmate` which unlocks the door for debugging GitHub actions using SSH or Web shell, pls refer to [Debugging with tmate](https://github.com/marketplace/actions/debugging-with-tmate) or to the [tmate docs](https://mxschmitt.github.io/action-tmate/) for detailed how-to instructions.
+
+And here is an example where `tmate` was applied in `integration-test` workflow of `liberty-on-aks` repo:
+* https://github.com/WASdev/azure.liberty.aks/pull/62/files#diff-b6766eb8febc0c51651250cd0cdfb44c4f0d3256470d88e62bf82fd46aa73ae0R119-R121
+
+
+## Authentication of the tmate session.
+> Refer to this [issue](https://github.com/mxschmitt/action-tmate/issues/163)’s [comment](https://github.com/mxschmitt/action-tmate/issues/163#issuecomment-1651436411),
+
+this [action](https://github.com/mxschmitt/action-tmate) uses the ssh public key from the github account as `authorised_keys`.
+So if you have multiple private keys in your local machine, you may need to specify the private key used for `*.tmate.io` in your `~/.ssh/config` file.
+
+
+
diff --git a/.github/it/tmate-sshkey.png b/.github/it/tmate-sshkey.png
new file mode 100644
index 000000000..7e4e383f6
Binary files /dev/null and b/.github/it/tmate-sshkey.png differ
diff --git a/.github/it/validation-plan-aks.json b/.github/it/validation-plan-aks.json
new file mode 100644
index 000000000..d59d08a1a
--- /dev/null
+++ b/.github/it/validation-plan-aks.json
@@ -0,0 +1,36 @@
+{
+ "validation_scenarios": [
+ {
+ "workflow": "testWlsAksWithDependencyCreation.yml",
+ "run_mode": "serial",
+ "scenarios": [
+ {
+ "scenario": "Deploy with passwordless postgresql",
+ "inputs": {
+ "location": "centralus",
+ "databaseType": "postgresql-passwordless(flexible)"
+ }
+ },
+ {
+ "scenario": "Disable the App Gateway Ingress Controller",
+ "inputs": {
+ "location": "centralus",
+ "configurations_for_it": {
+ "enableAppGWIngress": "false"
+ }
+ }
+ },
+ {
+ "scenario": "Bring you own AKS clusters and using postgresql",
+ "inputs": {
+ "location": "centralus",
+ "configurations_for_it": {
+ "createAKSCluster": "false",
+ "aksClusterName": "my-existing-aks-cluster"
+ }
+ }
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/.github/it/validation-plan-build.json b/.github/it/validation-plan-build.json
new file mode 100644
index 000000000..50ff53c38
--- /dev/null
+++ b/.github/it/validation-plan-build.json
@@ -0,0 +1,55 @@
+{
+ "validation_scenarios": [
+ {
+ "workflow": "buildWlsAksArtifact.yml",
+ "scenarios": [
+ {
+ "scenario": "Build WLS on AKS artifact",
+ "inputs": {
+ }
+
+ }
+ ]
+ },
+ {
+ "workflow": "buildWlsVm4AsArtifact.yml",
+ "scenarios": [
+ {
+ "scenario": "Build Admin Server VM artifact",
+ "inputs": {
+ }
+ }
+ ]
+ },
+ {
+ "workflow": "buildWlsVm4CcArtifact.yml",
+ "scenarios": [
+ {
+ "scenario": "Build Configured Cluster VM artifact",
+ "inputs": {
+ }
+ }
+ ]
+ },
+ {
+ "workflow": "buildWlsVm4DcArtifact.yml",
+ "scenarios": [
+ {
+ "scenario": "Build Dynamic Cluster VM artifact",
+ "inputs": {
+ }
+ }
+ ]
+ },
+ {
+ "workflow": "buildWlsVm4SnArtifact.yml",
+ "scenarios": [
+ {
+ "scenario": "Build Single Node VM artifact",
+ "inputs": {
+ }
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/.github/it/validation-plan-vm-admin.json b/.github/it/validation-plan-vm-admin.json
new file mode 100644
index 000000000..26090ed7a
--- /dev/null
+++ b/.github/it/validation-plan-vm-admin.json
@@ -0,0 +1,33 @@
+{
+ "validation_scenarios": [
+ {
+ "workflow": "testWlsVmAdmin.yml",
+ "scenarios": [
+ {
+ "scenario": "Test Admin Server on VM with mssqlserver",
+ "inputs": {
+ "location": "centralus"
+ }
+ },
+ {
+ "scenario": "Test Admin Server on VM with passwordless postgresql",
+ "inputs": {
+ "location": "centralus",
+ "databaseType": "postgresql-passwordless(flexible)"
+ }
+ },
+ {
+ "scenario": "Bring your own VNET for Admin Server on VM",
+ "inputs": {
+ "location": "centralus",
+ "configurations_for_it": {
+ "virtualNetworkNewOrExisting": "existing",
+ "virtualNetworkName": "myvirtualNetworkName-vm-admin",
+ "subnetName": "mySubnet-vm-admin"
+ }
+ }
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/.github/it/validation-plan-vm-cluster.json b/.github/it/validation-plan-vm-cluster.json
new file mode 100644
index 000000000..fbe0f006a
--- /dev/null
+++ b/.github/it/validation-plan-vm-cluster.json
@@ -0,0 +1,37 @@
+{
+ "validation_scenarios": [
+ {
+ "workflow": "testWlsVmCluster.yml",
+ "run_mode": "serial",
+ "scenarios": [
+ {
+ "scenario": "Test Configured Cluster on VM with mssqlserver",
+ "inputs": {
+ "location": "centralus",
+ "databaseType": "mssqlserver"
+ }
+ },
+ {
+ "scenario": "Bring your own VNET for Cluster on VM",
+ "inputs": {
+ "location": "centralus",
+ "databaseType": "mssqlserver",
+ "configurations_for_it": {
+ "virtualNetworkNewOrExisting": "existing",
+ "virtualNetworkName": "my-existing-cluster-vnet",
+ "subnetForCluster": "my-existing-cluster-subnet-for-cluster",
+ "subnetForAppGateway": "my-existing-cluster-subnet-for-app-gateway"
+ }
+ }
+ },
+ {
+ "scenario": "Test Configured Cluster on VM with PostgreSQL passwordless",
+ "inputs": {
+ "location": "centralus",
+ "databaseType": "postgresql-passwordless(flexible)"
+ }
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/.github/it/validation-plan-vm-dynamic-cluster.json b/.github/it/validation-plan-vm-dynamic-cluster.json
new file mode 100644
index 000000000..f7266db3a
--- /dev/null
+++ b/.github/it/validation-plan-vm-dynamic-cluster.json
@@ -0,0 +1,24 @@
+{
+ "validation_scenarios": [
+ {
+ "workflow": "testWlsVmDynamicCluster.yml",
+ "run_mode": "serial",
+ "scenarios": [
+ {
+ "scenario": "Test Dynamic Cluster on VM with mssqlserver",
+ "inputs": {
+ "location": "centralus",
+ "databaseType": "mssqlserver"
+ }
+ },
+ {
+ "scenario": "Test Dynamic Cluster on VM with PostgreSQL passwordless",
+ "inputs": {
+ "location": "centralus",
+ "databaseType": "postgresql-passwordless(flexible)"
+ }
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/.github/resource/azure-credential-setup-wls-aks.sh b/.github/resource/azure-credential-setup-wls-aks.sh
new file mode 100644
index 000000000..31b8af155
--- /dev/null
+++ b/.github/resource/azure-credential-setup-wls-aks.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+
+set -Eeuo pipefail
+
+echo "Execute azure-credential-setup.sh - Start------------------------------------------"
+
+## Create Azure Credentials
+SERVICE_PRINCIPAL_NAME_WLS_AKS="sp-${REPO_NAME}-wls-aks-$(date +%s)"
+echo "Creating Azure Service Principal with name: $SERVICE_PRINCIPAL_NAME_WLS_AKS"
+SUBSCRIPTION_ID=$(az account show --query id -o tsv| tr -d '\r\n')
+
+AZURE_CREDENTIALS=$(az ad sp create-for-rbac --name ${SERVICE_PRINCIPAL_NAME_WLS_AKS} --role="Contributor" --scopes="/subscriptions/${SUBSCRIPTION_ID}" --sdk-auth --only-show-errors)
+SP_ID=$( az ad sp list --display-name $SERVICE_PRINCIPAL_NAME_WLS_AKS --query \[0\].id -o tsv | tr -d '\r\n')
+az role assignment create --assignee ${SP_ID} --scope="/subscriptions/${SUBSCRIPTION_ID}" --role "User Access Administrator"
+
+## Set the Azure Credentials as a secret in the repository
+gh secret --repo $(gh repo set-default --view) set "AZURE_CREDENTIALS" -b"${AZURE_CREDENTIALS}"
+gh variable --repo $(gh repo set-default --view) set "SERVICE_PRINCIPAL_NAME_WLS_AKS" -b"${SERVICE_PRINCIPAL_NAME_WLS_AKS}"
+
+echo "Execute azure-credential-setup.sh - End--------------------------------------------"
diff --git a/.github/resource/azure-credential-setup-wls-vm.sh b/.github/resource/azure-credential-setup-wls-vm.sh
new file mode 100644
index 000000000..9d57cffb9
--- /dev/null
+++ b/.github/resource/azure-credential-setup-wls-vm.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+
+set -Eeuo pipefail
+
+echo "Execute azure-credential-setup.sh - Start------------------------------------------"
+
+## Create Azure Credentials
+SERVICE_PRINCIPAL_NAME_WLS_VM="sp-${REPO_NAME}-$(date +%s)"
+echo "Creating Azure Service Principal with name: $SERVICE_PRINCIPAL_NAME_WLS_VM"
+SUBSCRIPTION_ID=$(az account show --query id -o tsv| tr -d '\r\n')
+
+SERVICE_PRINCIPAL=$(az ad sp create-for-rbac --name ${SERVICE_PRINCIPAL_NAME_WLS_VM} --role="Contributor" --scopes="/subscriptions/${SUBSCRIPTION_ID}" --sdk-auth --only-show-errors | base64 ${w0})
+AZURE_CREDENTIALS=$(echo $SERVICE_PRINCIPAL | base64 -d)
+
+## Set the Azure Credentials as a secret in the repository
+gh secret --repo $(gh repo set-default --view) set "AZURE_CREDENTIALS" -b"${AZURE_CREDENTIALS}"
+gh variable --repo $(gh repo set-default --view) set "SERVICE_PRINCIPAL_NAME_WLS_VM" -b"${SERVICE_PRINCIPAL_NAME_WLS_VM}"
+
+echo "Execute azure-credential-setup.sh - End--------------------------------------------"
diff --git a/.github/resource/azure-credential-teardown-wls-aks.sh b/.github/resource/azure-credential-teardown-wls-aks.sh
new file mode 100644
index 000000000..8e85df681
--- /dev/null
+++ b/.github/resource/azure-credential-teardown-wls-aks.sh
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+
+set -Eeuo pipefail
+
+echo "Execute azure-credential-teardown.sh - Start------------------------------------------"
+
+gh secret --repo $(gh repo set-default --view) delete "AZURE_CREDENTIALS"
+SERVICE_PRINCIPAL_NAME_WLS_AKS=$(gh variable --repo $(gh repo set-default --view) get "SERVICE_PRINCIPAL_NAME_WLS_AKS")
+az ad sp delete --id $(az ad sp list --display-name $SERVICE_PRINCIPAL_NAME_WLS_AKS --query "[].appId" -o tsv| tr -d '\r\n')
+
+echo "Execute azure-credential-teardown.sh - End--------------------------------------------"
diff --git a/.github/resource/azure-credential-teardown-wls-vm.sh b/.github/resource/azure-credential-teardown-wls-vm.sh
new file mode 100644
index 000000000..520873f85
--- /dev/null
+++ b/.github/resource/azure-credential-teardown-wls-vm.sh
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+
+set -Eeuo pipefail
+
+echo "Execute azure-credential-teardown.sh - Start------------------------------------------"
+
+gh secret --repo $(gh repo set-default --view) delete "AZURE_CREDENTIALS"
+SERVICE_PRINCIPAL_NAME_WLS_VM=$(gh variable --repo $(gh repo set-default --view) get "SERVICE_PRINCIPAL_NAME_WLS_VM")
+az ad sp delete --id $(az ad sp list --display-name $SERVICE_PRINCIPAL_NAME_WLS_VM --query "[].appId" -o tsv| tr -d '\r\n')
+
+echo "Execute azure-credential-teardown.sh - End--------------------------------------------"
diff --git a/.github/resource/credentials-params-setup.sh b/.github/resource/credentials-params-setup.sh
new file mode 100644
index 000000000..b30bd6e11
--- /dev/null
+++ b/.github/resource/credentials-params-setup.sh
@@ -0,0 +1,61 @@
+#!/usr/bin/env bash
+set -Eeuo pipefail
+
+# ANSI color codes
+RED='\033[0;31m'
+NC='\033[0m' # No Color
+
+echo "setup-credentials.sh - Start"
+
+# Function to print error messages in red
+print_error() {
+ local message=$1
+ echo -e "${RED}Error: ${message}${NC}"
+}
+
+check_parameters() {
+ echo "Checking parameters..."
+ local has_empty_value=0
+
+ while IFS= read -r line; do
+ name=$(echo "$line" | yq -r '.name')
+ value=$(echo "$line" | yq -r '.value')
+
+ if [ -z "$value" ] || [ "$value" == "null" ]; then
+ print_error "The parameter '$name' has an empty/null value. Please provide a valid value."
+ has_empty_value=1
+ break
+ else
+ echo "Name: $name, Value: $value"
+ fi
+ done < <(yq eval -o=json '.[]' "$param_file" | jq -c '.')
+
+ echo "return $has_empty_value"
+ return $has_empty_value
+}
+
+# Function to set values from YAML
+set_values() {
+ echo "Setting values..."
+ yq eval -o=json '.[]' "$param_file" | jq -c '.' | while read -r line; do
+ name=$(echo "$line" | jq -r '.name')
+ value=$(echo "$line" | jq -r '.value')
+ gh secret --repo $(gh repo set-default --view) set "$name" -b"${value}"
+ done
+}
+
+# Main script execution
+main() {
+ if check_parameters; then
+ echo "All parameters are valid."
+ set_values
+ else
+ echo "Parameter check failed. Exiting."
+ exit 1
+ fi
+
+ echo "setup-credentials.sh - Finish"
+}
+
+# Run the main function
+main
diff --git a/.github/resource/credentials-params-teardown.sh b/.github/resource/credentials-params-teardown.sh
new file mode 100644
index 000000000..a014c4761
--- /dev/null
+++ b/.github/resource/credentials-params-teardown.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+set -Eeuo pipefail
+
+echo "teardown-credentials.sh - Start"
+
+# remove param the json
+yq eval -o=json '.[]' "$param_file" | jq -c '.' | while read -r line; do
+ name=$(echo "$line" | jq -r '.name')
+ value=$(echo "$line" | jq -r '.value')
+ gh secret --repo $(gh repo set-default --view) delete "$name"
+done
+
+echo "teardown-credentials.sh - Finish"
diff --git a/.github/resource/credentials-params-wls-aks.yaml b/.github/resource/credentials-params-wls-aks.yaml
new file mode 100644
index 000000000..c5c5784e8
--- /dev/null
+++ b/.github/resource/credentials-params-wls-aks.yaml
@@ -0,0 +1,23 @@
+# This file contains the parameters for the credentials used in the workflows.
+- name: ORC_SSOUSER
+ value: ""
+ description: "Oracle single sign-on userid."
+- name: ORC_SSOPSW
+ value: ""
+ description: "Password for Oracle single sign-on userid."
+- name: WDT_RUNTIMEPSW
+ value: ""
+ description: "Password for WebLogic Server and Runtime Deployment Tooling encryption."
+- name: WLS_PSW
+ value: ${WDT_RUNTIMEPSW}
+ description: "Password for WebLogic Server and Runtime Deployment Tooling encryption."
+# parameters for the credentials used in the workflows with default values.
+- name: WLS_USERNAME
+ value: "weblogic"
+ description: "WebLogic Server user name."
+- name: DB_PASSWORD
+ value: "Secret123!"
+ description: "Password for the database"
+- name: LOCATION
+ value: "eastus"
+ description: "Location of the resource group"
diff --git a/.github/resource/credentials-params-wls-vm.yaml b/.github/resource/credentials-params-wls-vm.yaml
new file mode 100644
index 000000000..33dc568f8
--- /dev/null
+++ b/.github/resource/credentials-params-wls-vm.yaml
@@ -0,0 +1,35 @@
+# This file contains the parameters for the credentials used in the workflows.
+- name: OTN_USERID
+ value: ""
+ description: Oracle single sign-on userid.
+- name: OTN_PASSWORD
+ value: ""
+ description: Password for Oracle single sign-on userid.
+- name: WLS_PSW
+ value: ""
+ description: Password for WebLogic Server.
+# Git credentials
+- name: USER_EMAIL
+ value: ""
+ description: User Email of GitHub acount to access GitHub repository.
+- name: USER_NAME
+ value: ""
+ description: User name of GitHub account
+- name: GIT_TOKEN
+ value: ""
+ description: GitHub token to access GitHub repository.
+# parameters for the credentials used in the workflows with default values.
+- name: LOCATION
+ value: "eastus"
+ description: Location of the resource group
+# Optional parameters:
+# if you want to use optional parameters, please uncomment the following lines
+#- name: ELK_URI
+# value: ""
+# description: URI (hostname:port) for Elastic server, leave blank if you don't want to integrate ELK.
+#- name: ELK_USER_NAME
+# value: ""
+# description: Account password for Elastic server, leave blank if you don't want to integrate ELK.
+#- name: ELK_PSW
+# value: ""
+# description: Account password for Elastic server, leave blank if you don't want to integrate ELK.
diff --git a/.github/resource/pre-check.sh b/.github/resource/pre-check.sh
new file mode 100644
index 000000000..533c230db
--- /dev/null
+++ b/.github/resource/pre-check.sh
@@ -0,0 +1,68 @@
+# Check environment and tools required to run the script
+
+# ANSI color codes
+GREEN='\033[0;32m'
+NC='\033[0m' # No Color
+
+## Check if the required tools are installed and logged in
+echo -e "${GREEN}To run this script, you need to have the following tools installed:${NC}"
+echo -e "${GREEN}1. yq${NC}"
+echo -e "${GREEN}2. Github CLI (gh)${NC}"
+echo -e "${GREEN}3. Azure CLI (az)${NC}"
+echo -e "${GREEN}And you need to be logged in to GitHub CLI (gh), and Azure CLI (az).${NC}"
+
+echo "Checking if the required tools are installed..."
+echo "Checking progress started..."
+
+if ! command -v yq &> /dev/null; then
+ echo "Check required tools and environment failed."
+ echo "yq is not installed. Please install it to proceed."
+ exit 1
+fi
+echo "1/6...yq is installed."
+
+if ! command -v jq &> /dev/null; then
+ echo "Check required tools and environment failed."
+ echo "jq is not installed. Please install it to proceed."
+ exit 1
+fi
+echo "2/6...jq is installed."
+
+# Check gh installed
+if ! command -v gh &> /dev/null; then
+ echo "Check required tools and environment failed."
+ echo "GitHub CLI (gh) is not installed. Please install it to proceed."
+ exit 1
+fi
+echo "3/6...GitHub CLI (gh) is installed."
+
+
+# Check if the GitHub CLI (gh) is logged in
+if ! gh auth status &> /dev/null; then
+ echo "Check required tools and environment failed."
+ echo "You are not logged in to GitHub CLI (gh). Please log in with `gh auth login` to proceed."
+ exit 1
+fi
+echo "4/6...You are logged in to GitHub CLI (gh)."
+
+# check if az is installed
+if ! command -v az &> /dev/null; then
+ echo "Check required tools and environment failed."
+ echo "Azure CLI (az) is not installed. Please install it to proceed."
+ exit 1
+fi
+echo "5/6...Azure CLI (az) is installed."
+
+
+# check if az is logged in
+if ! az account show &> /dev/null; then
+ echo "Check required tools and environment failed."
+ echo "You are not logged in to Azure CLI (az). Please log in with command `az login` to proceed."
+ exit 1
+fi
+echo "6/6...You are logged in to Azure CLI (az)."
+
+echo "Checking progress completed..."
+
+echo "Select default repository for this project"
+gh repo set-default
diff --git a/.github/variables/vm-dependencies.env b/.github/variables/vm-dependencies.env
new file mode 100644
index 000000000..3e181d08a
--- /dev/null
+++ b/.github/variables/vm-dependencies.env
@@ -0,0 +1,2 @@
+refArmttk=6b75cb7a3f65234995a2019fcae20a9b2c2d8635
+azCliVersion=2.72.0
diff --git a/.github/workflows/buildWlsAksArtifact.yml b/.github/workflows/buildWlsAksArtifact.yml
new file mode 100644
index 000000000..d40dfdb2b
--- /dev/null
+++ b/.github/workflows/buildWlsAksArtifact.yml
@@ -0,0 +1,59 @@
+name: Build WLS on AKS artifact
+
+on:
+ workflow_dispatch:
+ repository_dispatch:
+ types: [aks-package]
+ # Sample cURL
+ # curl --verbose -X POST https://api.github.com/repos//weblogic-azure/dispatches -H 'Accept: application/vnd.github.everest-preview+json' -H 'Authorization: token ' --data '{"event_type": "aks-package"}'
+
+env:
+ location: eastus
+ aksRepoUserName: oracle
+ aksRepoBranchName: main
+
+jobs:
+ preflight:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Get versions of external dependencies
+ run: |
+ curl -Lo external-deps-versions.properties https://raw.githubusercontent.com/Azure/azure-javaee-iaas/main/external-deps-versions.properties
+ source external-deps-versions.properties
+ echo "azCliVersion=${AZ_CLI_VERSION}" >> $GITHUB_ENV
+ echo "bicepVersion=${BICEP_VERSION}" >> $GITHUB_ENV
+ - name: Set up bicep
+ run: |
+ curl -Lo bicep https://github.com/Azure/bicep/releases/download/${bicepVersion}/bicep-linux-x64
+ chmod +x ./bicep
+ sudo mv ./bicep /usr/local/bin/bicep
+ bicep --version
+ - uses: actions/checkout@v2.3.4
+ - name: Set up Maven with GitHub token
+ uses: ./.github/actions/setupmaven
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
+ - name: Download arm-ttk used in partner center pipeline
+ run: |
+ wget -O arm-template-toolkit.zip https://aka.ms/arm-ttk-azureapps
+ unzip arm-template-toolkit.zip -d arm-ttk
+ - name: Checkout ${{ env.aksRepoUserName }}/weblogic-azure
+ uses: actions/checkout@v2
+ with:
+ path: weblogic-azure
+ - name: Build and test weblogic-azure/weblogic-azure-aks
+ run: mvn -Pbicep -Passembly clean install -Ptemplate-validation-tests --file weblogic-azure/weblogic-azure-aks/pom.xml
+ - name: Generate artifact file name and path
+ id: artifact_file
+ run: |
+ version=$(mvn -q -Dexec.executable=echo -Dexec.args='${version.wls-on-aks-azure-marketplace}' --file weblogic-azure/pom.xml --non-recursive exec:exec)
+ artifactName=wls-on-aks-azure-marketplace-$version-arm-assembly
+ unzip weblogic-azure/weblogic-azure-aks/target/$artifactName.zip -d weblogic-azure/weblogic-azure-aks/target/$artifactName
+ echo "##[set-output name=artifactName;]${artifactName}"
+ echo "##[set-output name=artifactPath;]weblogic-azure/weblogic-azure-aks/target/$artifactName"
+ - name: Archive weblogic-azure/weblogic-azure-aks template
+ uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
+ if: success()
+ with:
+ name: ${{steps.artifact_file.outputs.artifactName}}
+ path: ${{steps.artifact_file.outputs.artifactPath}}
diff --git a/.github/workflows/buildWlsVm4AsArtifact.yml b/.github/workflows/buildWlsVm4AsArtifact.yml
new file mode 100644
index 000000000..ff53bc5d3
--- /dev/null
+++ b/.github/workflows/buildWlsVm4AsArtifact.yml
@@ -0,0 +1,93 @@
+# Copyright (c) 2021, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+name: Build Admin Server VM artifact
+on:
+ repository_dispatch:
+ types: [vms-admin-package]
+ workflow_dispatch:
+ inputs:
+ pidType:
+ description: 'Specify which pids to use, oracle or microsoft.'
+ required: true
+ default: 'oracle'
+
+ # Sample cURL
+ # curl --verbose -X POST https://api.github.com/repos//weblogic-azure/dispatches -H 'Accept: application/vnd.github.everest-preview+json' -H 'Authorization: token ' --data '{"event_type": "vms-admin-package", "client_payload": {"pidType": "microsoft"}}'
+env:
+ offerName: "arm-oraclelinux-wls-admin"
+ repoName: "weblogic-azure"
+ repoOwner: ${{ github.repository_owner }}
+ ref: ${{ github.ref_name }}
+
+jobs:
+ package:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Setup environment variables
+ id: setup-env-variables-based-on-dispatch-event
+ run: |
+ if [ ${{ github.event_name }} == 'workflow_dispatch' ]; then
+ pidType=${{ github.event.inputs.pidType }}
+ else
+ pidType=${{ github.event.client_payload.pidType }}
+ fi
+
+ if [ -z "$pidType" ]; then
+ pidType='microsoft'
+ fi
+
+ echo "##[set-output name=pidType;]${pidType}"
+ echo "pidType=${pidType}" >> $GITHUB_ENV
+
+ - uses: actions/checkout@v2.3.4
+ - name: Set up Maven with GitHub token
+ uses: ./.github/actions/setupmaven
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
+ - uses: actions/checkout@v2.3.4
+ - name: Set dependency reference
+ uses: ./.github/actions/setvars
+ with:
+ varFilePath: ./.github/variables/vm-dependencies.env
+ - name: Download arm-ttk used in partner center pipeline
+ run: |
+ wget -O arm-template-toolkit.zip https://aka.ms/arm-ttk-azureapps
+ unzip arm-template-toolkit.zip -d arm-ttk
+ - name: Checkout ${{ env.repoName }}
+ uses: actions/checkout@v2
+ with:
+ path: ${{ env.repoName }}
+
+ - name: Update utilities path location
+ run: |
+ cd ${{env.repoName}}/weblogic-azure-vm/${{ env.offerName }}
+ find . -name "*.json" | xargs sed -i 's|../../../../utilities|../utilities|g' $1
+ - name: Build and test ${{ env.offerName }} using ${{ env.pidType }} pids
+ run: |
+ cd ${{env.repoName}}/weblogic-azure-vm/${{ env.offerName }}
+ pidType=${{ env.pidType }}
+ if [[ "${pidType}" == "oracle" ]];then
+ echo "using oracle pid"
+ mvn -Ptemplate-validation-tests clean install -Dgit.repo.owner=${{ env.repoOwner }} -Dgit.tag=${{ env.ref }}
+ else
+ echo "using ms pid"
+ mvn -Ptemplate-validation-tests clean install -Ddev -Dgit.repo.owner=${{ env.repoOwner }} -Dgit.tag=${{ env.ref }}
+ fi
+
+ - name: Generate artifact file name and path
+ id: artifact_file
+ run: |
+ version=$(mvn -q -Dexec.executable=echo -Dexec.args='${version.${{ env.offerName }}}' --file weblogic-azure/pom.xml --non-recursive exec:exec)
+ cd ${{env.repoName}}/weblogic-azure-vm/${{ env.offerName }}
+ artifactName=${{ env.offerName }}-$version-arm-assembly
+ unzip target/$artifactName.zip -d target/$artifactName
+ echo "##[set-output name=artifactName;]${artifactName}-${{ env.pidType }}"
+ echo "##[set-output name=artifactPath;]${{env.repoName}}/weblogic-azure-vm/${{ env.offerName }}/target/$artifactName"
+ - name: Archive ${{ env.offerName }} template
+ uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
+ if: success()
+ with:
+ name: ${{steps.artifact_file.outputs.artifactName}}
+ path: ${{steps.artifact_file.outputs.artifactPath}}
+
diff --git a/.github/workflows/buildWlsVm4CcArtifact.yml b/.github/workflows/buildWlsVm4CcArtifact.yml
new file mode 100644
index 000000000..d8d4a6394
--- /dev/null
+++ b/.github/workflows/buildWlsVm4CcArtifact.yml
@@ -0,0 +1,94 @@
+#Copyright (c) 2021 Oracle and/or its affiliates.
+#Released under the Universal Permissive License v1.0 as shown at
+# https://oss.oracle.com/licenses/upl/
+
+name: Build Configured Cluster VM artifact
+on:
+ repository_dispatch:
+ types: [vms-configured-cluster-package]
+ workflow_dispatch:
+ inputs:
+ pidType:
+ description: 'Specify which pids to use, oracle or microsoft.'
+ required: true
+ default: 'oracle'
+
+ # Sample cURL
+ # curl --verbose -X POST https://api.github.com/repos//weblogic-azure/dispatches -H 'Accept: application/vnd.github.everest-preview+json' -H 'Authorization: token ' --data '{"event_type": "vms-configured-cluster-package", "client_payload": {"pidType": "microsoft"} }'
+env:
+ offerName: "arm-oraclelinux-wls-cluster"
+ repoName: "weblogic-azure"
+ repoOwner: ${{ github.repository_owner }}
+ ref: ${{ github.ref_name }}
+
+jobs:
+ package:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Setup environment variables
+ id: setup-env-variables-based-on-dispatch-event
+ run: |
+ if [ ${{ github.event_name }} == 'workflow_dispatch' ]; then
+ pidType=${{ github.event.inputs.pidType }}
+ else
+ pidType=${{ github.event.client_payload.pidType }}
+ fi
+ if [ -z "$pidType" ]; then
+ pidType='microsoft'
+ fi
+
+ echo "##[set-output name=pidType;]${pidType}"
+ echo "pidType=${pidType}" >> $GITHUB_ENV
+
+ - uses: actions/checkout@v2.3.4
+ - name: Set up Maven with GitHub token
+ uses: ./.github/actions/setupmaven
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
+ - uses: actions/checkout@v2.3.4
+ - name: Set dependency reference
+ uses: ./.github/actions/setvars
+ with:
+ varFilePath: ./.github/variables/vm-dependencies.env
+ - name: Download arm-ttk used in partner center pipeline
+ run: |
+ wget -O arm-template-toolkit.zip https://aka.ms/arm-ttk-azureapps
+ unzip arm-template-toolkit.zip -d arm-ttk
+ - name: Checkout ${{ env.repoName }}
+ uses: actions/checkout@v2
+ with:
+ path: ${{ env.repoName }}
+
+ - name: Update utilities path location
+ run: |
+ cd ${{env.repoName}}/weblogic-azure-vm/${{ env.offerName }}
+ find . -name "*.json" | xargs sed -i 's|../../../../../utilities|../utilities|g' $1
+ - name: Build and test ${{ env.offerName }} using ${{ env.pidType }} pids
+ run: |
+ cd ${{env.repoName}}/weblogic-azure-vm/${{ env.offerName }}/${{ env.offerName }}
+ pidType=${{ env.pidType }}
+ if [[ "${pidType}" == "oracle" ]];then
+ echo "using oracle pid"
+ mvn -Ptemplate-validation-tests clean install -Dgit.repo.owner=${{ env.repoOwner }} -Dgit.tag=${{ env.ref }}
+ else
+ echo "using ms pid"
+ mvn -Ptemplate-validation-tests clean install -Ddev -Dgit.repo.owner=${{ env.repoOwner }} -Dgit.tag=${{ env.ref }}
+ fi
+
+ - name: Generate artifact file name and path
+ id: artifact_file
+ run: |
+ version=$(mvn -q -Dexec.executable=echo -Dexec.args='${version.${{ env.offerName }}}' --file weblogic-azure/pom.xml --non-recursive exec:exec)
+ cd ${{env.repoName}}/weblogic-azure-vm/${{ env.offerName }}/${{ env.offerName }}
+ artifactName=${{ env.offerName }}-$version-arm-assembly
+ unzip target/$artifactName.zip -d target/$artifactName
+ echo "##[set-output name=artifactName;]${artifactName}-${{ env.pidType }}"
+ echo "##[set-output name=artifactPath;]${{env.repoName}}/weblogic-azure-vm/${{ env.offerName }}/${{ env.offerName }}/target/$artifactName"
+ - name: Archive ${{ env.offerName }} template
+ uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
+ if: success()
+ with:
+ name: ${{steps.artifact_file.outputs.artifactName}}
+ path: ${{steps.artifact_file.outputs.artifactPath}}
+
+
diff --git a/.github/workflows/buildWlsVm4DcArtifact.yml b/.github/workflows/buildWlsVm4DcArtifact.yml
new file mode 100644
index 000000000..4450d5592
--- /dev/null
+++ b/.github/workflows/buildWlsVm4DcArtifact.yml
@@ -0,0 +1,90 @@
+#Copyright (c) 2021 Oracle and/or its affiliates.
+#Released under the Universal Permissive License v1.0 as shown at
+# https://oss.oracle.com/licenses/upl/
+
+name: Build Dynamic Cluster VM artifact
+on:
+ repository_dispatch:
+ types: [vms-dynamic-cluster-package]
+ workflow_dispatch:
+ inputs:
+ pidType:
+ description: 'Specify which pids to use, oracle or microsoft.'
+ required: true
+ default: 'oracle'
+
+ # Sample cURL
+ # curl --verbose -X POST https://api.github.com/repos//weblogic-azure/dispatches -H 'Accept: application/vnd.github.everest-preview+json' -H 'Authorization: token ' --data '{"event_type": "vms-dynamic-cluster-package", "client_payload": {"pidType": "microsoft"}}'
+env:
+ offerName: "arm-oraclelinux-wls-dynamic-cluster"
+ repoName: "weblogic-azure"
+ repoOwner: ${{ github.repository_owner }}
+ ref: ${{ github.ref_name }}
+
+
+jobs:
+ package:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Setup environment variables
+ id: setup-env-variables-based-on-dispatch-event
+ run: |
+ if [ ${{ github.event_name }} == 'workflow_dispatch' ]; then
+ pidType=${{ github.event.inputs.pidType }}
+ else
+ pidType=${{ github.event.client_payload.pidType }}
+ fi
+ if [ -z "$pidType" ]; then
+ pidType='microsoft'
+ fi
+
+ echo "##[set-output name=pidType;]${pidType}"
+ echo "pidType=${pidType}" >> $GITHUB_ENV
+
+ - uses: actions/checkout@v2.3.4
+ - name: Set up Maven with GitHub token
+ uses: ./.github/actions/setupmaven
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
+ - uses: actions/checkout@v2.3.4
+ - name: Download arm-ttk used in partner center pipeline
+ run: |
+ wget -O arm-template-toolkit.zip https://aka.ms/arm-ttk-azureapps
+ unzip arm-template-toolkit.zip -d arm-ttk
+ - name: Checkout ${{ env.repoName }}
+ uses: actions/checkout@v2
+ with:
+ path: ${{ env.repoName }}
+
+ - name: Update utilities path location
+ run: |
+ cd ${{env.repoName}}/weblogic-azure-vm/${{ env.offerName }}
+ find . -name "*.json" | xargs sed -i 's|../../../../../utilities|../utilities|g' $1
+ - name: Build and test ${{ env.offerName }} using ${{ env.pidType }} pids
+ run: |
+ cd ${{env.repoName}}/weblogic-azure-vm/${{ env.offerName }}/${{ env.offerName }}
+ pidType=${{ env.pidType }}
+ echo ${pidType}
+ if [[ "${pidType}" == "oracle" ]];then
+ echo "using oracle pids"
+ mvn -Ptemplate-validation-tests clean install -Dgit.repo.owner=${{ env.repoOwner }} -Dgit.tag=${{ env.ref }}
+ else
+ echo "using ms pids"
+ mvn -Ptemplate-validation-tests clean install -Ddev -Dgit.repo.owner=${{ env.repoOwner }} -Dgit.tag=${{ env.ref }}
+ fi
+
+ - name: Generate artifact file name and path
+ id: artifact_file
+ run: |
+ version=$(mvn -q -Dexec.executable=echo -Dexec.args='${version.${{ env.offerName }}}' --file weblogic-azure/pom.xml --non-recursive exec:exec)
+ cd ${{env.repoName}}/weblogic-azure-vm/${{ env.offerName }}/${{ env.offerName }}
+ artifactName=${{ env.offerName }}-$version-arm-assembly
+ unzip target/$artifactName.zip -d target/$artifactName
+ echo "##[set-output name=artifactName;]${artifactName}-${{ env.pidType }}"
+ echo "##[set-output name=artifactPath;]${{env.repoName}}/weblogic-azure-vm/${{ env.offerName }}/${{ env.offerName }}/target/$artifactName"
+ - name: Archive ${{ env.offerName }} template
+ uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
+ if: success()
+ with:
+ name: ${{steps.artifact_file.outputs.artifactName}}
+ path: ${{steps.artifact_file.outputs.artifactPath}}
diff --git a/.github/workflows/buildWlsVm4SnArtifact.yml b/.github/workflows/buildWlsVm4SnArtifact.yml
new file mode 100644
index 000000000..3ce879945
--- /dev/null
+++ b/.github/workflows/buildWlsVm4SnArtifact.yml
@@ -0,0 +1,87 @@
+# Copyright (c) 2021, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+name: Build Single Node VM artifact
+on:
+ repository_dispatch:
+ types: [vms-single-node-package]
+ workflow_dispatch:
+ inputs:
+ pidType:
+ description: 'Specify which pids to use, oracle or microsoft.'
+ required: true
+ default: 'oracle'
+
+ # Sample cURL
+ # curl --verbose -X POST https://api.github.com/repos//weblogic-azure/dispatches -H 'Accept: application/vnd.github.everest-preview+json' -H 'Authorization: token ' --data '{"event_type": "vms-single-node-package", "client_payload": {"pidType": "microsoft"}}'
+env:
+ offerName: "arm-oraclelinux-wls"
+ repoName: "weblogic-azure"
+ repoOwner: ${{ github.repository_owner }}
+ ref: ${{ github.ref_name }}
+
+jobs:
+ package:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Setup environment variables
+ id: setup-env-variables-based-on-dispatch-event
+ run: |
+ if [ ${{ github.event_name }} == 'workflow_dispatch' ]; then
+ pidType=${{ github.event.inputs.pidType }}
+
+ else
+ pidType=${{ github.event.client_payload.pidType }}
+
+ fi
+
+ if [ -z "$pidType" ]; then
+ pidType='microsoft'
+ fi
+
+ echo "##[set-output name=pidType;]${pidType}"
+ echo "pidType=${pidType}" >> $GITHUB_ENV
+
+ - uses: actions/checkout@v2.3.4
+ - name: Set up Maven with GitHub token
+ uses: ./.github/actions/setupmaven
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
+ - uses: actions/checkout@v2.3.4
+ - name: Download arm-ttk used in partner center pipeline
+ run: |
+ wget -O arm-template-toolkit.zip https://aka.ms/arm-ttk-azureapps
+ unzip arm-template-toolkit.zip -d arm-ttk
+ - name: Checkout ${{ env.repoName }}
+ uses: actions/checkout@v2
+ with:
+ path: ${{ env.repoName }}
+
+ - name: Build and test ${{ env.offerName }} using ${{ env.pidType }} pids
+ run: |
+ cd ${{env.repoName}}/weblogic-azure-vm/${{ env.offerName }}
+ pidType=${{ env.pidType }}
+ if [[ "${pidType}" == "oracle" ]];then
+ echo "using oracle pid"
+ mvn -Ptemplate-validation-tests clean install -Dgit.repo.owner=${{ env.repoOwner }} -Dgit.tag=${{ env.ref }}
+ else
+ echo "using ms pid"
+ mvn -Ptemplate-validation-tests clean install -Ddev -Dgit.repo.owner=${{ env.repoOwner }} -Dgit.tag=${{ env.ref }}
+ fi
+
+ - name: Generate artifact file name and path
+ id: artifact_file
+ run: |
+ cd ${{env.repoName}}/weblogic-azure-vm/${{ env.offerName }}
+ version=$(mvn -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
+ artifactName=${{ env.offerName }}-$version-arm-assembly
+ unzip target/$artifactName.zip -d target/$artifactName
+ echo "##[set-output name=artifactName;]${artifactName}-${{ env.pidType }}"
+ echo "##[set-output name=artifactPath;]${{env.repoName}}/weblogic-azure-vm/${{ env.offerName }}/target/$artifactName"
+ - name: Archive ${{ env.offerName }} template
+ uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
+ if: success()
+ with:
+ name: ${{steps.artifact_file.outputs.artifactName}}
+ path: ${{steps.artifact_file.outputs.artifactPath}}
+
diff --git a/.github/workflows/checkARMVMSize.yml b/.github/workflows/checkARMVMSize.yml
new file mode 100644
index 000000000..ac776235d
--- /dev/null
+++ b/.github/workflows/checkARMVMSize.yml
@@ -0,0 +1,110 @@
+name: Check ARM VM Size Changes
+
+on:
+ workflow_dispatch:
+ schedule:
+ - cron: '0 0 */14 * *' # Runs at midnight (00:00) UTC every 14 days (2 weeks)
+
+env:
+ azureCredentials: ${{ secrets.AZURE_CREDENTIALS }}
+ repoName: "weblogic-azure"
+ userEmail: ${{ secrets.USER_EMAIL }}
+ userName: ${{ secrets.USER_NAME }}
+ GH_TOKEN: ${{ secrets.GIT_TOKEN }}
+
+jobs:
+ check-vm-sizes:
+ if: github.event_name == 'workflow_dispatch' || (github.event_name == 'schedule' && github.repository_owner == 'azure-javaee')
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout weblogic-azure
+ uses: actions/checkout@v2
+ with:
+ path: weblogic-azure
+
+ - uses: azure/login@v1
+ id: azure-login
+ with:
+ creds: ${{ env.azureCredentials }}
+
+ - name: Check for VM size changes
+ id: check_vm_sizes
+ run: |
+ ls -l ${{ env.repoName }}
+ # Path to the properties file
+ property_file="${{ env.repoName }}/resources/azure-common.properties"
+
+ # Check if the properties file exists
+ if [ ! -f "$property_file" ]; then
+ echo "Properties file '$property_file' not found."
+ exit 1
+ fi
+
+ if ! grep -q '^azure\.armBased\.vmSize\.list=' "$property_file"; then
+ echo "Line 'azure.armBased.vmSize.list' not found in $property_file."
+ echo "vm_sizes_changed=false" >> "$GITHUB_OUTPUT"
+ exit 0
+ fi
+
+ # Using grep to find the line containing azure.armBased.vmSize.list
+ vm_size_variable=$(grep '^azure\.armBased\.vmSize\.list=' "$property_file")
+ vm_size_list=${vm_size_variable#*=}
+
+ # Print the extracted value
+ echo "$vm_size_list"
+
+ latest_locations=$(az account list-locations --query '[?not_null(metadata.latitude)] .name' -o tsv)
+
+ new_sizes=""
+ for location in $latest_locations; do
+ latest_sizes=$(az vm list-sizes --location $location | jq '.[] | select(.name | contains("p")) | .name' | tr -d "\"")
+ for size in $latest_sizes; do
+ # if new_sizes does not contain size
+ if [[ $(echo "[$new_sizes]" | jq '. | index("'${size}'")') == null ]]; then
+ echo "Add size: ${size}"
+ if [ -z "$new_sizes" ]; then
+ new_sizes="\"$size\""
+ else
+ new_sizes="$new_sizes,\"$size\""
+ fi
+ fi
+ done
+ done
+
+ if [ ${#new_sizes} -ne ${#vm_size_list} ]; then
+ echo "VM sizes changed"
+ echo "vm_sizes_changed=true" >> "$GITHUB_OUTPUT"
+ else
+ echo "vm_sizes_changed=false" >> "$GITHUB_OUTPUT"
+ fi
+
+ echo "Current sizes : $new_sizes"
+ echo "latest_sizes=\"${new_sizes}\"" >> "$GITHUB_OUTPUT"
+
+ - name: Create PR if changes detected
+ if: steps.check_vm_sizes.outputs.vm_sizes_changed == 'true'
+ run: |
+ # Logic to create a pull request to update the ARM VM sizes configuration file
+ # Example: Use GitHub CLI or git commands to create a branch and push changes
+ cd ${{ env.repoName }}
+ branchName="update-vm-sizes-$(date +%s)"
+ git config --global user.email "${userEmail}"
+ git config --global user.name "${userName}"
+
+ git checkout -b ${branchName}
+ # Use sed to delete the line starting with azure.armBased.vmSize.list=
+ property_file="resources/azure-common.properties"
+ sed -i '/^azure\.armBased\.vmSize\.list=/d' "$property_file"
+ latest_sizes=$(echo ${{ steps.check_vm_sizes.outputs.latest_sizes }} | sed 's/,/","/g')
+ echo "azure.armBased.vmSize.list=\"$latest_sizes\"" >> "$property_file"
+
+ git add $property_file
+ git commit -m "Update ARM VM sizes"
+ git push origin ${branchName}
+
+ # Create a pull request
+ gh pr create --title "Update ARM VM sizes" \
+ --body "Automatic update of ARM VM sizes based on latest changes" \
+ --reviewer edburns,galiacheng \
+ --base main \
+ --head ${branchName}
diff --git a/.github/workflows/it-validation-aks.yaml b/.github/workflows/it-validation-aks.yaml
new file mode 100644
index 000000000..1147412bf
--- /dev/null
+++ b/.github/workflows/it-validation-aks.yaml
@@ -0,0 +1,46 @@
+name: IT Validation for AKS
+run-name: Running validation workflows with plan:${{ github.event_name == 'schedule' && 'plan-aks' || inputs.it_plan }}
+
+on:
+ schedule:
+ - cron: '0 6 * * 1' # Every Monday at 06:00 UTC
+ workflow_dispatch:
+ inputs:
+ it_plan:
+ description: 'Path to the validation plan file'
+ required: true
+ type: choice
+ options:
+ - plan-aks
+ default: plan-aks
+
+jobs:
+ execute-validation:
+ runs-on: ubuntu-latest
+ outputs:
+ results: ${{ steps.it-validation.outputs.results }}
+ report_url: ${{ steps.it-validation.outputs.report_url }}
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set validation plan file
+ id: set-plan-file
+ run: |
+ case "${{ inputs.it_plan || 'plan-aks' }}" in
+ plan-aks)
+ IT_FILE=".github/it/validation-plan-aks.json"
+ ;;
+ *)
+ echo "Unknown plan option: ${{ inputs.it_plan }}"
+ exit 1
+ ;;
+ esac
+ echo "it_file=$IT_FILE" >> $GITHUB_OUTPUT
+
+ - name: Execute IT Validation
+ id: it-validation
+ uses: ./.github/actions/it
+ with:
+ it_file: ${{ steps.set-plan-file.outputs.it_file }}
+ github_token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/it-validation-build.yaml b/.github/workflows/it-validation-build.yaml
new file mode 100644
index 000000000..5700d1d7c
--- /dev/null
+++ b/.github/workflows/it-validation-build.yaml
@@ -0,0 +1,46 @@
+name: IT Validation for Build
+run-name: Running validation workflows with plan:${{ github.event_name == 'schedule' && 'plan-build' || inputs.it_plan }}
+
+on:
+ schedule:
+ - cron: '0 2 * * *' # Runs daily at 2:00 AM UTC
+ workflow_dispatch:
+ inputs:
+ it_plan:
+ description: 'Path to the validation plan file'
+ required: true
+ type: choice
+ options:
+ - plan-build
+ default: plan-build
+
+jobs:
+ execute-validation:
+ runs-on: ubuntu-latest
+ outputs:
+ results: ${{ steps.it-validation.outputs.results }}
+ report_url: ${{ steps.it-validation.outputs.report_url }}
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set validation plan file
+ id: set-plan-file
+ run: |
+ case "${{ inputs.it_plan || 'plan-build' }}" in
+ plan-build)
+ IT_FILE=".github/it/validation-plan-build.json"
+ ;;
+ *)
+ echo "Unknown plan option: ${{ inputs.it_plan }}"
+ exit 1
+ ;;
+ esac
+ echo "it_file=$IT_FILE" >> $GITHUB_OUTPUT
+
+ - name: Execute IT Validation
+ id: it-validation
+ uses: ./.github/actions/it
+ with:
+ it_file: ${{ steps.set-plan-file.outputs.it_file }}
+ github_token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/it-validation-vm-admin.yaml b/.github/workflows/it-validation-vm-admin.yaml
new file mode 100644
index 000000000..45eff4ab5
--- /dev/null
+++ b/.github/workflows/it-validation-vm-admin.yaml
@@ -0,0 +1,46 @@
+name: IT Validation for VM Admin
+run-name: Running validation workflows with plan:${{ github.event_name == 'schedule' && 'plan-vm-admin' || inputs.it_plan }}
+
+on:
+ schedule:
+ - cron: '0 1 * * 1' # Every Monday at 01:00 UTC
+ workflow_dispatch:
+ inputs:
+ it_plan:
+ description: 'Path to the validation plan file'
+ required: true
+ type: choice
+ options:
+ - plan-vm-admin
+ default: plan-vm-admin
+
+jobs:
+ execute-validation:
+ runs-on: ubuntu-latest
+ outputs:
+ results: ${{ steps.it-validation.outputs.results }}
+ report_url: ${{ steps.it-validation.outputs.report_url }}
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set validation plan file
+ id: set-plan-file
+ run: |
+ case "${{ inputs.it_plan || 'plan-vm-admin' }}" in
+ plan-vm-admin)
+ IT_FILE=".github/it/validation-plan-vm-admin.json"
+ ;;
+ *)
+ echo "Unknown plan option: ${{ inputs.it_plan }}"
+ exit 1
+ ;;
+ esac
+ echo "it_file=$IT_FILE" >> $GITHUB_OUTPUT
+
+ - name: Execute IT Validation
+ id: it-validation
+ uses: ./.github/actions/it
+ with:
+ it_file: ${{ steps.set-plan-file.outputs.it_file }}
+ github_token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/it-validation-vm-cluster.yaml b/.github/workflows/it-validation-vm-cluster.yaml
new file mode 100644
index 000000000..86e0820ad
--- /dev/null
+++ b/.github/workflows/it-validation-vm-cluster.yaml
@@ -0,0 +1,46 @@
+name: IT Validation for VM Cluster
+run-name: Running validation workflows with plan:${{ github.event_name == 'schedule' && 'plan-vm-cluster' || inputs.it_plan }}
+
+on:
+ schedule:
+ - cron: '0 11 * * 1' # Runs Every Monday at 11:00 UTC
+ workflow_dispatch:
+ inputs:
+ it_plan:
+ description: 'Path to the validation plan file'
+ required: true
+ type: choice
+ options:
+ - plan-vm-cluster
+ default: plan-vm-cluster
+
+jobs:
+ execute-validation:
+ runs-on: ubuntu-latest
+ outputs:
+ results: ${{ steps.it-validation.outputs.results }}
+ report_url: ${{ steps.it-validation.outputs.report_url }}
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set validation plan file
+ id: set-plan-file
+ run: |
+ case "${{ inputs.it_plan || 'plan-vm-cluster' }}" in
+ plan-vm-cluster)
+ IT_FILE=".github/it/validation-plan-vm-cluster.json"
+ ;;
+ *)
+ echo "Unknown plan option: ${{ inputs.it_plan }}"
+ exit 1
+ ;;
+ esac
+ echo "it_file=$IT_FILE" >> $GITHUB_OUTPUT
+
+ - name: Execute IT Validation
+ id: it-validation
+ uses: ./.github/actions/it
+ with:
+ it_file: ${{ steps.set-plan-file.outputs.it_file }}
+ github_token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/it-validation-vm-dynamic-cluster.yaml b/.github/workflows/it-validation-vm-dynamic-cluster.yaml
new file mode 100644
index 000000000..a7ed8e3c2
--- /dev/null
+++ b/.github/workflows/it-validation-vm-dynamic-cluster.yaml
@@ -0,0 +1,46 @@
+name: IT Validation for VM Dynamic Cluster
+run-name: Running validation workflows with plan:${{ github.event_name == 'schedule' && 'plan-vm-dynamic-cluster' || inputs.it_plan }}
+
+on:
+ schedule:
+ - cron: '0 15 * * 1' # Runs Every Monday at 15:00 UTC
+ workflow_dispatch:
+ inputs:
+ it_plan:
+ description: 'Path to the validation plan file'
+ required: true
+ type: choice
+ options:
+ - plan-vm-dynamic-cluster
+ default: plan-vm-dynamic-cluster
+
+jobs:
+ execute-validation:
+ runs-on: ubuntu-latest
+ outputs:
+ results: ${{ steps.it-validation.outputs.results }}
+ report_url: ${{ steps.it-validation.outputs.report_url }}
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set validation plan file
+ id: set-plan-file
+ run: |
+ case "${{ inputs.it_plan || 'plan-vm-dynamic-cluster' }}" in
+ plan-vm-dynamic-cluster)
+ IT_FILE=".github/it/validation-plan-vm-dynamic-cluster.json"
+ ;;
+ *)
+ echo "Unknown plan option: ${{ inputs.it_plan }}"
+ exit 1
+ ;;
+ esac
+ echo "it_file=$IT_FILE" >> $GITHUB_OUTPUT
+
+ - name: Execute IT Validation
+ id: it-validation
+ uses: ./.github/actions/it
+ with:
+ it_file: ${{ steps.set-plan-file.outputs.it_file }}
+ github_token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/newtag.yml b/.github/workflows/newtag.yml
new file mode 100644
index 000000000..c605d2235
--- /dev/null
+++ b/.github/workflows/newtag.yml
@@ -0,0 +1,151 @@
+name: New Tag
+on:
+ workflow_dispatch:
+ inputs:
+ tagname:
+ description: 'Specify Tag name to create/update.'
+ required: true
+ default: '2021-12-10-01-Q4'
+ ref:
+ description: 'Specify Git Ref if needed.'
+ required: false
+ default: 'refs/heads/main'
+ repository_dispatch:
+ types: [gh-pages-newtag]
+ # sample cURL
+ # curl --verbose -X POST https://api.github.com/repos//weblogic-azure/dispatches -H 'Accept: application/vnd.github.everest-preview+json' -H 'Authorization: token ' --data ''
+ # sample
+ # {"event_type": "gh-pages-newtag", "client_payload": {"tagname": "2021-12-09-02-Q4", "ref": "refs/heads/main" }}
+
+env:
+ tagbranch: "tagbranch"
+ gitToken: ${{ secrets.GIT_TOKEN }}
+ repoName: "weblogic-azure"
+ userEmail: ${{ secrets.USER_EMAIL }}
+ userName: ${{ secrets.USER_NAME }}
+ repoOwner: ${{ github.repository_owner }}
+
+jobs:
+ newtag:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Setup environment variables
+ id: setup-env-variables-based-on-dispatch-event
+ run: |
+ if [ ${{ github.event_name }} == 'workflow_dispatch' ]; then
+ tagname=${{ github.event.inputs.tagname }}
+ ref=${{ github.event.inputs.ref }}
+ else
+ tagname=${{ github.event.client_payload.tagname }}
+ ref=${{ github.event.client_payload.ref }}
+ fi
+
+ if [ -z "$tagname" ]; then
+ tagname=${userName}`date +%m%d`
+ fi
+
+ if [ -z "$ref" ]; then
+ ref='refs/heads/main'
+ fi
+
+ echo "##[set-output name=tagname;]${tagname}"
+ echo "##[set-output name=ref;]${ref}"
+ echo "tagname=${tagname}" >> $GITHUB_ENV
+ echo "ref=${ref}" >> $GITHUB_ENV
+ - uses: actions/checkout@v2.3.4
+ - name: Set up Maven with GitHub token
+ uses: ./.github/actions/setupmaven
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
+ - uses: actions/checkout@v2.3.4
+ - name: Set dependency reference
+ uses: ./.github/actions/setvars
+ with:
+ varFilePath: ./.github/variables/vm-dependencies.env
+ - name: Get versions of external dependencies
+ id: get-external-dependencies-version
+ run: |
+ curl -Lo external-deps-versions.properties https://raw.githubusercontent.com/Azure/azure-javaee-iaas/main/external-deps-versions.properties
+ source external-deps-versions.properties
+ echo "bicepVersion=${BICEP_VERSION}" >> $GITHUB_ENV
+ - name: Checkout ${{ env.repoName }}
+ uses: actions/checkout@v2
+ with:
+ path: ${{ env.repoName }}
+ ref: ${{ env.ref }}
+ token: ${{ env.gitToken }}
+ - name: Checkout arm-ttk
+ uses: actions/checkout@v2
+ with:
+ repository: Azure/arm-ttk
+ path: arm-ttk
+ ref: ${{ env.refArmttk }}
+ - name: Set up bicep
+ run: |
+ curl -Lo bicep https://github.com/Azure/bicep/releases/download/${bicepVersion}/bicep-linux-x64
+ chmod +x ./bicep
+ sudo mv ./bicep /usr/local/bin/bicep
+ bicep --version
+ - name: Build ${{ env.repoName }}
+ run: |
+ cd ${{ env.repoName }}
+ mvn -Ptemplate-validation-tests clean install --file weblogic-azure-vm/pom.xml -Dgit.repo.owner=${{ env.repoOwner }} -Dgit.tag=${{ env.ref }}
+
+ mvn -Ptemplate-validation-tests -Pbicep clean install --file weblogic-azure-aks/pom.xml -Dgit.repo.owner=${{ env.repoOwner }} -Dgit.tag=${{ env.ref }}
+ ls weblogic-azure-aks/target/bicep
+ bicep build weblogic-azure-aks/target/bicep/mainTemplate.bicep --outfile weblogic-azure-aks/src/main/arm/mainTemplate.json
+ bicep build weblogic-azure-aks/target/bicep/modules/setupDBConnection.bicep --outfile weblogic-azure-aks/src/main/arm/dbTemplate.json
+ bicep build weblogic-azure-aks/target/bicep/modules/updateWebLogicApplications.bicep --outfile weblogic-azure-aks/src/main/arm/updateAppTemplate.json
+
+ - name: Create new tag
+ run: |
+ cd ${{ env.repoName }}
+
+ git config --global core.longpaths true
+ git config --global user.email $userEmail
+ git config --global user.name $userName
+
+ authGitPath=https://$gitToken@github.com/${GITHUB_REPOSITORY}.git
+
+ echo "Create tag branch"
+ remoteBranches=$(git ls-remote --heads)
+ echo ${remoteBranches}
+ if [[ -n `echo ${remoteBranches} | grep "${tagbranch}"` ]]; then
+ git push ${authGitPath} --delete ${tagbranch} -f
+ fi
+
+ if [[ -n `git branch --all | grep "${tagbranch}"` ]]; then
+ git branch -D ${tagbranch}
+ fi
+
+ git checkout -b ${tagbranch}
+
+ # replace pids
+ list=$(find weblogic-azure-vm -name "*.json" | grep "\/target\/")
+ for file in ${list}; do
+ sourcePath=$(echo "$file" | sed "s:target:src/main:g")
+ if test -f "$sourcePath"; then
+ echo "Replace ${sourcePath} with ${file}"
+ cp -f $file $sourcePath
+ fi
+ done
+
+ git status
+ git add --all
+ git commit -m "hard code pids"
+ git fetch --unshallow
+ git push ${authGitPath} ${tagbranch} -f
+
+ # remove existing tag
+ if [[ -n `git ls-remote --tags | grep "${tagname}"` ]]; then
+ git push ${authGitPath} --delete ${tagname} -f
+ fi
+
+ # create new tag
+ git tag ${tagname}
+ git push ${authGitPath} ${tagname} -f
+ git remote add upstream $gitToken@github.com:oracle/weblogic-azure.git
+ # ignore the error if cannot push, but log it
+ push_upstream_ignore_failure () { echo "push upstream result: $?" return 0; }
+ git push upstream ${tagname} -f || push_upstream_ignore_failure
+ git push ${authGitPath} --delete ${tagbranch} -f
diff --git a/.github/workflows/setup-for-wls-aks.sh b/.github/workflows/setup-for-wls-aks.sh
new file mode 100644
index 000000000..50a147246
--- /dev/null
+++ b/.github/workflows/setup-for-wls-aks.sh
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+
+################################################
+# This script is invoked by a human who:
+# - has done az login.
+# - can create repository secrets in the github repo from which this file was cloned.
+# - has the gh client >= 2.0.0 installed.
+# - has yq 4.x installed.
+#
+# This script initializes the repo from which this file is was cloned
+# with the necessary secrets to run the workflows.
+# Steps to run the Script:
+# 1. Run az login.
+# 2. Run gh auth login.
+# 3. Clone the repository.
+# 4. Prepare the .github/resource/credentials-params-wls-aks.yaml file with the required parameters.
+# 5. Run the script with the following command:
+# ```
+# cd .github/workflows
+# bash setup-for-wls-aks.sh
+# ```
+# 6. The script will set the required secrets in the repository.
+# 7. Check the repository secrets to verify that the secrets are set.
+################################################
+
+set -Eeuo pipefail
+
+source ../resource/pre-check.sh
+## Set environment variables
+export param_file="../resource/credentials-params-wls-aks.yaml"
+source ../resource/credentials-params-setup.sh
+source ../resource/azure-credential-setup-wls-aks.sh
diff --git a/.github/workflows/setup-for-wls-vm.sh b/.github/workflows/setup-for-wls-vm.sh
new file mode 100644
index 000000000..1cb3d71eb
--- /dev/null
+++ b/.github/workflows/setup-for-wls-vm.sh
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+
+################################################
+# This script is invoked by a human who:
+# - has done az login.
+# - can create repository secrets in the github repo from which this file was cloned.
+# - has the gh client >= 2.0.0 installed.
+# - has yq 4.x installed.
+#
+# This script initializes the repo from which this file is was cloned
+# with the necessary secrets to run the workflows.
+# Steps to run the Script:
+# 1. Run az login.
+# 2. Run gh auth login.
+# 3. Clone the repository.
+# 4. Prepare the .github/resource/credentials-params-wls-vm.yaml file with the required parameters.
+# 5. Run the script with the following command:
+# ```
+# cd .github/workflows
+# bash setup-for-wls-vm.sh
+# ```
+# 6. The script will set the required secrets in the repository.
+# 7. Check the repository secrets to verify that the secrets are set.
+################################################
+
+set -Eeuo pipefail
+
+source ../resource/pre-check.sh
+## Set environment variables
+export param_file="../resource/credentials-params-wls-vm.yaml"
+source ../resource/credentials-params-setup.sh
+source ../resource/azure-credential-setup-wls-vm.sh
diff --git a/.github/workflows/setupWlsAksDependency.yml b/.github/workflows/setupWlsAksDependency.yml
new file mode 100644
index 000000000..72f87d3d4
--- /dev/null
+++ b/.github/workflows/setupWlsAksDependency.yml
@@ -0,0 +1,108 @@
+name: Setup DB and Storage Account
+
+on:
+ workflow_dispatch:
+ # Sample cURL
+ # curl --verbose -X POST https://api.github.com/repos//weblogic-azure/dispatches -H 'Accept: application/vnd.github.everest-preview+json' -H 'Authorization: token ' --data '{"event_type": "aks-deploy-dependency"}'
+ repository_dispatch:
+ types: [aks-deploy-dependency]
+
+env:
+ azureCredentials: ${{ secrets.AZURE_CREDENTIALS }}
+ location: eastus
+ dbAdminUser: weblogic
+ dbPassword: ${{ secrets.DB_PASSWORD }}
+ dbName: wlsdb${{ github.run_id }}${{ github.run_number }}
+ dbServerName: weblogicdb
+ resourceGroupForDB: wlsd-db-${{ github.run_id }}-${{ github.run_number }}
+ resourceGroupForStorageAccount: wlsd-sa-${{ github.run_id }}-${{ github.run_number }}
+ storageAccountName: wlsdsa${{ github.run_id }}${{ github.run_number }}
+ storageContainerName: wlsdcon${{ github.run_id }}${{ github.run_number }}
+
+jobs:
+ deploy-db:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2.3.4
+ - name: Set AZ CLI Version
+ id: set-az-cli-version
+ uses: ./.github/actions/setvars
+ with:
+ varFilePath: ./.github/variables/vm-dependencies.env
+ - uses: azure/login@v1
+ id: azure-login
+ with:
+ creds: ${{ env.azureCredentials }}
+ - name: Create Resource Group
+ uses: azure/CLI@v1
+ with:
+ azcliversion: ${{ env.azCliVersion }}
+ inlineScript: |
+ echo "create resource group" ${{ env.resourceGroupForDB }}
+ az group create --verbose --name ${{ env.resourceGroupForDB }} --location ${{ env.location }}
+
+ - uses: actions/checkout@v2.3.4
+ - name: Set up PostgreSQL Flexible Server that allows access from Azure services
+ uses: ./.github/actions/createPostgresqlFlexibleServer
+ with:
+ dbAdminUser: ${{ env.dbAdminUser }}
+ dbName: ${{ env.dbName }}
+ dbPassword: ${{ env.dbPassword }}
+ dbServerName: ${{ env.dbServerName }}
+ location: ${{ env.location }}
+ resourceGroupName: ${{ env.resourceGroupForDB }}
+ deploy-storage-account:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2.3.4
+ - name: Set AZ CLI Version
+ id: set-az-cli-version
+ uses: ./.github/actions/setvars
+ with:
+ varFilePath: ./.github/variables/vm-dependencies.env
+ - uses: azure/login@v1
+ id: azure-login
+ with:
+ creds: ${{ env.azureCredentials }}
+ - name: Create Resource Group
+ uses: azure/CLI@v1
+ with:
+ azcliversion: ${{ env.azCliVersion }}
+ inlineScript: |
+ echo "create resource group" ${{ env.resourceGroupForStorageAccount }}
+ az group create --verbose --name ${{ env.resourceGroupForStorageAccount }} --location ${{ env.location }}
+ - name: Create Storage Account
+ uses: azure/CLI@v1
+ with:
+ azcliversion: ${{ env.azCliVersion }}
+ inlineScript: |
+ az storage account create --name ${{ env.storageAccountName }} \
+ --resource-group ${{ env.resourceGroupForStorageAccount }} \
+ --location ${{ env.location }} \
+ --sku Standard_LRS \
+ --kind StorageV2
+ - name: Create Storage Container
+ uses: azure/CLI@v1
+ with:
+ azcliversion: ${{ env.azCliVersion }}
+ inlineScript: |
+ az storage container create -n ${{ env.storageContainerName }} --account-name ${{ env.storageAccountName }}
+ format-db-sa-parameters-for-integration-test:
+ needs: [deploy-storage-account, deploy-db]
+ runs-on: ubuntu-latest
+ steps:
+ - name: Generate integration-test parameter json
+ id: artifact_file
+ run: |
+ cat <integration-test-data.txt
+ # sample request
+ curl --verbose -X POST https://api.github.com/repos/${{ github.repository_owner }}/weblogic-azure/dispatches -H 'Accept: application/vnd.github.everest-preview+json' -H 'Authorization: token ' --data ''
+ # copy the JSON as
+ {"event_type": "aks-integration-test-without-dependency-creation", "client_payload": {"gitUserNameForArtifactsLocation": "${{ github.repository_owner }}", "testBranchNameForArtifactsLocation": "${{ github.ref }}", "isForDemo": "false", "disambiguationSuffix": "${{ github.run_id }}", "storageAccountName": "${{ env.storageAccountName }}", "storageContainerName": "${{ env.storageContainerName }}", "dbName": "${{ env.dbName }}"}}
+ EOF
+ - name: Archive integration-test-data.txt
+ uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
+ if: success()
+ with:
+ name: integration-test-data
+ path: integration-test-data.txt
diff --git a/.github/workflows/syncupWithUpstream.yml b/.github/workflows/syncupWithUpstream.yml
new file mode 100644
index 000000000..35c9df677
--- /dev/null
+++ b/.github/workflows/syncupWithUpstream.yml
@@ -0,0 +1,31 @@
+name: Merge upstream branches for WLS on AKS
+on:
+ workflow_dispatch:
+ # Sample cURL
+ # curl --verbose -X POST https://api.github.com/repos//weblogic-azure/dispatches -H 'Accept: application/vnd.github.everest-preview+json' -H 'Authorization: token ' --data '{"event_type": "aks-upstream-sync"}'
+ repository_dispatch:
+ types: [aks-upstream-sync]
+env:
+ userName: ${{ secrets.USER_NAME }}
+ userEmail: ${{ secrets.USER_EMAIL }}
+ gitToken: ${{ secrets.GIT_TOKEN }}
+jobs:
+ merge:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ token: ${{ env.gitToken }}
+ - name: Merge upstream
+ run: |
+ git config --global user.name ${{ env.userName }}
+ git config --global user.email ${{ env.userEmail }}
+
+ # "git checkout main" is unnecessary, already here by default
+ git pull --unshallow
+
+ git remote add upstream https://github.com/oracle/weblogic-azure.git
+ git fetch upstream
+
+ git merge --no-edit upstream/main
+ git push origin main
diff --git a/.github/workflows/teardown-for-wls-aks.sh b/.github/workflows/teardown-for-wls-aks.sh
new file mode 100644
index 000000000..25fada69b
--- /dev/null
+++ b/.github/workflows/teardown-for-wls-aks.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+
+################################################
+# This script is invoked by a human who:
+# - can remove repository secrets in the github repo from which this file was cloned.
+# - has the gh client >= 2.0.0 installed.
+# - has yq 4.x installed.
+#
+# This script initializes the repo from which this file is was cloned
+# with the necessary secrets to run the workflows.
+# Steps to run the Script:
+# 1. Run gh auth login.
+# 2. Clone the repository.
+# 3. Run the script with the following command:
+# ```
+# cd .github/workflows
+# bash teardown-for-wls-aks.sh
+# ```
+# 4. The script will remove the required secrets in the repository.
+# 5. Check the repository secrets to verify that the secrets are removed.
+################################################
+
+set -Eeuo pipefail
+
+source ../resource/pre-check.sh
+## Set environment variables
+export param_file="../resource/credentials-params-wls-aks.yaml"
+source ../resource/credentials-params-teardown.sh
+source ../resource/azure-credential-teardown-wls-aks.sh
diff --git a/.github/workflows/teardown-for-wls-vm.sh b/.github/workflows/teardown-for-wls-vm.sh
new file mode 100644
index 000000000..f05889971
--- /dev/null
+++ b/.github/workflows/teardown-for-wls-vm.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+
+################################################
+# This script is invoked by a human who:
+# - can remove repository secrets in the github repo from which this file was cloned.
+# - has the gh client >= 2.0.0 installed.
+# - has yq 4.x installed.
+#
+# This script initializes the repo from which this file is was cloned
+# with the necessary secrets to run the workflows.
+# Steps to run the Script:
+# 1. Run gh auth login.
+# 2. Clone the repository.
+# 3. Run the script with the following command:
+# ```
+# cd .github/workflows
+# bash teardown-for-wls-vm.sh
+# ```
+# 4. The script will remove the required secrets in the repository.
+# 5. Check the repository secrets to verify that the secrets are removed.
+################################################
+
+set -Eeuo pipefail
+
+source ../resource/pre-check.sh
+## Set environment variables
+export param_file="../resource/credentials-params-wls-vm.yaml"
+source ../resource/credentials-params-teardown.sh
+source ../resource/azure-credential-teardown-wls-vm.sh
diff --git a/.github/workflows/testWlsAksWithDependencyCreation.yml b/.github/workflows/testWlsAksWithDependencyCreation.yml
new file mode 100644
index 000000000..e2a249565
--- /dev/null
+++ b/.github/workflows/testWlsAksWithDependencyCreation.yml
@@ -0,0 +1,550 @@
+name: Test WLS on AKS with Dependency creation
+run-name: Test WLS on AKS with Dependency creation with `db`:${{ inputs.databaseType }}
+
+on:
+ workflow_dispatch:
+ inputs:
+ isForDemo:
+ description: 'If set to true, resources will not be deleted'
+ required: true
+ default: 'false'
+ vmSize:
+ description: 'The VM size for the AKS pool'
+ required: true
+ default: Standard_D2s_v3
+ location:
+ description: 'The location for the resources'
+ required: true
+ default: centralus
+ databaseType:
+ description: 'Database connection'
+ required: true
+ default: 'postgresql(flexible)'
+ type: choice
+ options:
+ - postgresql(flexible)
+ - postgresql-passwordless(flexible)
+ configurations_for_it:
+ description: "JSON string of environment variables used for IT"
+ required: false
+ default: '{}'
+ # sample cURL
+ # curl --verbose -X POST https://api.github.com/repos//weblogic-azure/dispatches -H 'Accept: application/vnd.github.everest-preview+json' -H 'Authorization: token ' --data '{"event_type": "aks-integration-test-with-dependency-creation", "client_payload": {"isForDemo": "false"}}'
+ repository_dispatch:
+ types: [aks-integration-test-with-dependency-creation,integration-test-all]
+
+env:
+ azureCredentials: ${{ secrets.AZURE_CREDENTIALS }}
+ resourceGroupForWlsAks: wlsd-aks-${{ github.run_id }}-${{ github.run_number }}
+ dbAdminUser: weblogic
+ dbPassword: ${{ secrets.DB_PASSWORD }}
+ dbName: wlsdb${{ github.run_id }}${{ github.run_number }}
+ dbServerName: db${{ github.run_id }}${{ github.run_number }}
+ uamiName: uami${{ github.run_id }}${{ github.run_number }}
+ ocrSSOUser: ${{ secrets.ORC_SSOUSER }}
+ ocrSSOPSW: ${{ secrets.ORC_SSOPSW }}
+ wdtRuntimePassword: ${{ secrets.WDT_RUNTIMEPSW}}
+ wlsUserName: ${{ secrets.WLS_USERNAME }}
+ wlsPassword: ${{ secrets.WLS_PSW }}
+ resourceGroupForDB: wlsd-db-${{ github.repository_owner }}-${{ github.run_id }}-${{ github.run_number }}
+ resourceGroupForStorageAccount: wlsd-sa-${{ github.run_id }}-${{ github.run_number }}
+ storageAccountName: wlsdsa${{ github.run_id }}${{ github.run_number }}
+ storageContainerName: wlsdcon${{ github.run_id }}${{ github.run_number }}
+ wlsImageTag: "14.1.2.0-generic-jdk17-ol9"
+
+jobs:
+ preflight:
+ outputs:
+ artifactName: ${{steps.artifact_file.outputs.artifactName}}
+ isForDemo: ${{ steps.setup-env-variables-based-on-dispatch-event.outputs.isForDemo }}
+ azCliVersion: ${{steps.set-az-cli-version.outputs.azCliVersion}}
+ vmSize: ${{ steps.setup-env-variables-based-on-dispatch-event.outputs.vmSize }}
+ location: ${{ steps.setup-env-variables-based-on-dispatch-event.outputs.location }}
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2.3.4
+ - name: Set AZ CLI Version and save in variable azCliVersion
+ uses: ./.github/actions/setvars
+ with:
+ varFilePath: ./.github/variables/vm-dependencies.env
+ - name: Output Az CLi version
+ id: set-az-cli-version
+ run: |
+ echo "azCliVersion=${azCliVersion}" >> $GITHUB_OUTPUT
+ - name: Get versions of external dependencies
+ id: get-external-dependencies-version
+ run: |
+ curl -Lo external-deps-versions.properties https://raw.githubusercontent.com/Azure/azure-javaee-iaas/main/external-deps-versions.properties
+ source external-deps-versions.properties
+ echo "bicepVersion=${BICEP_VERSION}" >> $GITHUB_ENV
+ echo "refArmttk=${ARM_TTK_REFERENCE}" >> $GITHUB_ENV
+ - name: Setup environment variables
+ id: setup-env-variables-based-on-dispatch-event
+ run: |
+ location=centralus # default value
+ if [ ${{ github.event_name }} == 'workflow_dispatch' ]; then
+ isForDemo=${{ github.event.inputs.isForDemo }}
+ vmSize=${{ github.event.inputs.vmSize }}
+ location=${{ github.event.inputs.location }}
+ else
+ isForDemo=${{ github.event.client_payload.isForDemo }}
+ vmSize=${{ github.event.client_payload.vmSize }}
+ location=${{ github.event.client_payload.location }}
+ fi
+
+ echo "##[set-output name=isForDemo;]${isForDemo}"
+ echo "##[set-output name=vmSize;]${vmSize}"
+ echo "##[set-output name=location;]${location}"
+
+ echo "isForDemo=${isForDemo}" >> $GITHUB_ENV
+ echo "vmSize=${vmSize}" >> $GITHUB_ENV
+ echo "location=${location}" >> $GITHUB_ENV
+ - uses: actions/checkout@v2.3.4
+ - name: Set up Maven with GitHub token
+ uses: ./.github/actions/setupmaven
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
+ - name: Set up bicep
+ run: |
+ curl -Lo bicep https://github.com/Azure/bicep/releases/download/${bicepVersion}/bicep-linux-x64
+ chmod +x ./bicep
+ sudo mv ./bicep /usr/local/bin/bicep
+ bicep --version
+ - name: Checkout arm-ttk
+ uses: actions/checkout@v2
+ with:
+ repository: Azure/arm-ttk
+ path: arm-ttk
+ ref: ${{ env.refArmttk }}
+ - name: Checkout weblogic-azure
+ uses: actions/checkout@v2
+ with:
+ path: weblogic-azure
+ - name: Build and test weblogic-azure/weblogic-azure-aks
+ run: mvn -Pbicep -Passembly clean install -Ptemplate-validation-tests --file weblogic-azure/weblogic-azure-aks/pom.xml
+ - name: Generate artifact file name and path
+ id: artifact_file
+ run: |
+ version=$(mvn -q -Dexec.executable=echo -Dexec.args='${version.wls-on-aks-azure-marketplace}' --file weblogic-azure/pom.xml --non-recursive exec:exec)
+ artifactName=wls-on-aks-azure-marketplace-$version-arm-assembly
+ unzip weblogic-azure/weblogic-azure-aks/target/$artifactName.zip -d weblogic-azure/weblogic-azure-aks/target/$artifactName
+ echo "##[set-output name=artifactName;]${artifactName}"
+ echo "##[set-output name=artifactPath;]weblogic-azure/weblogic-azure-aks/target/$artifactName"
+ - name: Archive weblogic-azure/weblogic-azure-aks template
+ uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
+ if: success()
+ with:
+ name: ${{steps.artifact_file.outputs.artifactName}}
+ path: ${{steps.artifact_file.outputs.artifactPath}}
+
+ deploy-storage-account:
+ needs: preflight
+ runs-on: ubuntu-latest
+ steps:
+ - name: Get AZ CLI Version
+ run: |
+ echo "azCliVersion=${{needs.preflight.outputs.azCliVersion}}" >> $GITHUB_ENV
+ echo "location=${{needs.preflight.outputs.location}}" >> $GITHUB_ENV
+ - name: Checkout Azure-Samples/cargotracker-wls-aks
+ uses: actions/checkout@v2
+ with:
+ repository: Azure-Samples/cargotracker-wls-aks
+ path: cargotracker
+ - uses: actions/setup-java@v4
+ with:
+ distribution: 'microsoft'
+ java-version: '11'
+ - run: mvn clean install -PweblogicOnAks --file cargotracker/pom.xml
+ - name: Query version string for deployment verification
+ run: |
+ PROPERTY_FILE="cargotracker/target/cargo-tracker/WEB-INF/classes/org/eclipse/cargotracker/messages.properties"
+ PROP_KEY=versionString
+ deployVersion=$(cat $PROPERTY_FILE | grep "$PROP_KEY" | cut -d '=' -f 2)
+ echo "deployVersion=${deployVersion}" >> $GITHUB_ENV
+ - uses: azure/login@v1
+ id: azure-login
+ with:
+ creds: ${{ env.azureCredentials }}
+ - name: Create Resource Group
+ uses: azure/CLI@v1
+ with:
+ azcliversion: ${{ env.azCliVersion }}
+ inlineScript: |
+ echo "create resource group" ${{ env.resourceGroupForStorageAccount }}
+ az group create --verbose --name ${{ env.resourceGroupForStorageAccount }} --location $location
+ - name: Create Storage Account
+ uses: azure/CLI@v1
+ with:
+ azcliversion: ${{ env.azCliVersion }}
+ inlineScript: |
+ az storage account create --name ${{ env.storageAccountName }} \
+ --resource-group ${{ env.resourceGroupForStorageAccount }} \
+ --location $location \
+ --sku Standard_LRS \
+ --kind StorageV2
+ - name: Create Storage Container
+ uses: azure/CLI@v1
+ with:
+ azcliversion: ${{ env.azCliVersion }}
+ inlineScript: |
+ az storage container create -n ${{ env.storageContainerName }} --account-name ${{ env.storageAccountName }}
+ - name: Upload built web app war file
+ uses: azure/CLI@v1
+ with:
+ azcliversion: ${{ env.azCliVersion }}
+ inlineScript: |
+ az storage blob upload --account-name ${{ env.storageAccountName }} --container-name ${{ env.storageContainerName }} --file cargotracker/target/cargo-tracker.war --name cargo-tracker.war
+ deploy-wls-on-aks:
+ needs: [deploy-storage-account, preflight]
+ runs-on: ubuntu-latest
+ env: ${{ fromJson(inputs.configurations_for_it) }}
+ steps:
+ - name: Get AZ CLI Version
+ run: |
+ echo "azCliVersion=${{needs.preflight.outputs.azCliVersion}}" >> $GITHUB_ENV
+ echo "location=${{needs.preflight.outputs.location}}" >> $GITHUB_ENV
+ - name: Checkout weblogic-azure
+ uses: actions/checkout@v4
+ with:
+ path: weblogic-azure
+ - name: Download artifact for deployment
+ uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
+ with:
+ name: ${{needs.preflight.outputs.artifactName}}
+ path: ${{needs.preflight.outputs.artifactName}}
+ - uses: azure/login@v1
+ id: azure-login
+ with:
+ creds: ${{ env.azureCredentials }}
+ - name: Query web app blob url and set to env
+ uses: azure/CLI@v1
+ with:
+ azcliversion: ${{ env.azCliVersion }}
+ inlineScript: |
+ sasTokenValidTime=3600
+ expiryData=$(( `date +%s`+${sasTokenValidTime}))
+ sasEnd=`date -d@"$expiryData" -u '+%Y-%m-%dT%H:%MZ'`
+ sasToken=$(az storage account generate-sas \
+ --permissions r \
+ --account-name ${{ env.storageAccountName }} \
+ --services b \
+ --resource-types sco \
+ --expiry $sasEnd -o tsv)
+ cargoTrackerBlobUrl=$(az storage blob url \
+ --container-name ${{ env.storageContainerName }} \
+ --name cargo-tracker.war \
+ --account-name ${{ env.storageAccountName }} \
+ --sas-token ${sasToken} -o tsv)
+
+ echo "cargoTrackerBlobUrl=${cargoTrackerBlobUrl}" >> $GITHUB_ENV
+ - name: Create Resource Group
+ uses: azure/CLI@v1
+ with:
+ azcliversion: ${{ env.azCliVersion }}
+ inlineScript: |
+ echo "create resource group" ${{ env.resourceGroupForWlsAks }}
+ az group create --verbose --name ${{ env.resourceGroupForWlsAks }} --location $location
+ - name: Checkout Azure-Samples/cargotracker-wls-aks
+ uses: actions/checkout@v2
+ with:
+ repository: Azure-Samples/cargotracker-wls-aks
+ path: cargotracker
+
+ - name: Provision Azure Vnet
+ id: vnet-provision
+ if: ${{ env.newOrExistingVnetForApplicationGateway == 'existing' }}
+ run: |
+ echo "Provisioning Azure Vnet with subnet"
+ az network vnet create \
+ --resource-group ${{ env.resourceGroupForWlsAks }} \
+ --name ${{ env.vnetForApplicationGateway.name }} \
+ --address-prefix 10.0.0.0/28 \
+ --subnet-name ${{ env.vnetForApplicationGateway.subnets.gatewaySubnet.name }} \
+ --subnet-prefix 10.0.0.0/29
+
+ - name: Create Database Resource Group
+ uses: azure/CLI@v1
+ with:
+ azcliversion: ${{ env.azCliVersion }}
+ inlineScript: |
+ echo "create database resource group" ${{ env.resourceGroupForDB }}
+ az group create --verbose --name ${{ env.resourceGroupForDB }} --location $location
+
+ - name: Provision database
+ id: database-provision
+ uses: ./weblogic-azure/.github/actions/database-provision
+ with:
+ databaseType: ${{ inputs.databaseType }}
+ resourceGroup: ${{ env.resourceGroupForDB }}
+ uamiName: ${{ env.uamiName }}
+ location: $location
+ dbInstanceName: ${{ env.dbServerName }}
+ dbPassword: ${{ env.dbPassword }}
+ databaseName: ${{ env.dbName }}
+ dbAdminUser: ${{ env.dbAdminUser }}
+
+ - name: Get database parameters
+ id: database-parameters
+ uses: ./weblogic-azure/.github/actions/database-parameters
+ with:
+ databaseType: ${{ inputs.databaseType }}
+ uamiId: ${{ steps.database-provision.outputs.uamiId }}
+ serverHost: ${{ steps.database-provision.outputs.serverHost }}
+ dbInstanceName: ${{ env.dbServerName }}
+ databaseName: ${{ env.dbName }}
+ dbAdminUser: ${{ env.dbAdminUser }}
+
+ - name: Prepare parameter file
+ run: |
+ if ${{ env.createAKSCluster == 'false' }}; then
+ echo "Deploy with an existing AKS cluster"
+ export createAKSCluster=false
+ # the env aksClusterName is set in the `validation-plan-aks.json` file.
+ export aksClusterName=${{ env.aksClusterName }}
+ export aksClusterRGName=${{ env.resourceGroupForWlsAks }}
+ else
+ echo "Deploy with a new AKS cluster"
+ export createAKSCluster=true
+ export aksClusterName="aks-cluster-${{ github.run_id }}-${{ github.run_number }}"
+ export aksClusterRGName=${{ env.resourceGroupForWlsAks }}
+ fi
+
+ if ${{ env.enableAppGWIngress == 'false' }}; then
+ echo "Application Gateway Ingress Controller is disabled"
+ export enableAppGWIngress=false
+ else
+ echo "Application Gateway Ingress Controller is enabled"
+ export enableAppGWIngress=true
+ fi
+
+ # prepare parameters for vnet and application gateway
+ export newOrExistingVnetForApplicationGateway=${{ env.newOrExistingVnetForApplicationGateway }}
+ export vnetForApplicationGateway=${{ env.vnetForApplicationGateway.name }}
+ export vnetRGNameForApplicationGateway=${{ env.resourceGroupForWlsAks }}
+
+ echo "generate parameter file"
+ export databaseType='${{ steps.database-parameters.outputs.databaseType }}'
+ export enableDB=${{ steps.database-parameters.outputs.enableDB }}
+ export enablePswlessConnection=${{ steps.database-parameters.outputs.enablePswlessConnection }}
+ export dsConnectionURL='${{ steps.database-parameters.outputs.dsConnectionURL }}'
+ export dbUser='${{ steps.database-parameters.outputs.dbUser }}'
+ export dbIdentity='${{ steps.database-parameters.outputs.dbIdentity }}'
+ export dbPassword=${{ env.dbPassword }}
+ export wlsImageTag=${{ env.wlsImageTag }}
+
+ export gitUserName=${{ github.repository_owner }}
+ export testbranchName=${{ github.sha }}
+ export appPackageUrls=${cargoTrackerBlobUrl}
+
+ export location=${location}
+ export ocrSSOPSW=${ocrSSOPSW}
+ export ocrSSOUser=${ocrSSOUser}
+ export wdtRuntimePassword=${wdtRuntimePassword}
+ export wlsPassword=${wlsPassword}
+ export wlsUserName=${wlsUserName}
+ export vmSize=${{ needs.preflight.outputs.vmSize }}
+
+ echo "Generating parameter file..."
+ envsubst < "./weblogic-azure/weblogic-azure-aks/src/test/parameters-deploy-template.json" > "./weblogic-azure/weblogic-azure-aks/src/test/parameters-deploy-${{ github.job }}.json"
+
+ - name: Archive parameters-deploy.json
+ uses: actions/upload-artifact@v4.6.2
+ if: success()
+ with:
+ name: parameters-deploy.json
+ path: ./weblogic-azure/weblogic-azure-aks/src/test/parameters-deploy-${{ github.job }}.json
+
+ - name: Provision AKS Cluster as an existing cluster for deployment
+ if: ${{ env.createAKSCluster == 'false' }}
+ run: |
+ # the value of **createAKSCluster** is `false`, which means the offer won't create a new AKS cluster, but use an existing one.
+ # in order to simulate the same behavior as the offer, we need to create a new AKS cluster.
+
+ az feature register \
+ --namespace "Microsoft.ContainerService" \
+ --name "AppGatewayWithOverlayPreview"
+ az provider register --namespace Microsoft.ContainerService
+
+ az aks create \
+ -n ${{ env.aksClusterName }} \
+ -g ${{ env.resourceGroupForWlsAks }} \
+ --enable-managed-identity \
+ --network-plugin azure \
+ --load-balancer-sku standard \
+ --generate-ssh-keys
+
+ - name: Deploy WebLogic Server Cluster Domain offer
+ id: deploy-wls-cluster
+ uses: azure/CLI@v1
+ with:
+ azcliversion: ${{ env.azCliVersion }}
+ inlineScript: |
+ artifactName=${{ needs.preflight.outputs.artifactName }}
+ az deployment group create \
+ --verbose \
+ --resource-group ${{ env.resourceGroupForWlsAks }} \
+ --name wls-on-aks \
+ --parameters @weblogic-azure/weblogic-azure-aks/src/test/parameters-deploy-${{ github.job }}.json \
+ --template-file ${artifactName}/mainTemplate.json
+
+
+ - name: Query Application Gateway URL
+ run: |
+ if ${{ env.enableAppGWIngress == 'false' }}; then
+ echo skipping current step as Application Gateway Ingress Controller is disabled
+ exit 0
+ fi
+
+ appgatewayname=$(az resource list --resource-group ${{ env.resourceGroupForWlsAks }} --query "[?type=='Microsoft.Network/applicationGateways'].name|[0]" -o tsv)
+ echo $appgatewayname
+ publicIpAddressId=$(az network application-gateway show --resource-group ${{ env.resourceGroupForWlsAks }} --name ${appgatewayname} --query frontendIPConfigurations[0].publicIPAddress.id -o tsv)
+ echo $publicIpAddressId
+ appGatewayURL=$(az network public-ip show --resource-group ${{ env.resourceGroupForWlsAks }} --ids ${publicIpAddressId} --query dnsSettings.fqdn -o tsv)
+ echo $appGatewayURL
+ echo "appGatewayURL=${appGatewayURL}" >> $GITHUB_ENV
+ - name: Verify Cargo Tracker is deployed as expected
+ run: |
+ if ${{ env.enableAppGWIngress == 'false' }}; then
+ echo skipping current step as Application Gateway Ingress Controller is disabled
+ exit 0
+ fi
+
+ echo "Verifying Cargo Tracker is deployed as expected"
+ curl --verbose http://${{ env.appGatewayURL }}/cargo-tracker/
+ response=$(curl --write-out '%{http_code}' --silent --output /dev/null http://${{ env.appGatewayURL }}/cargo-tracker/)
+ echo "$response"
+ if [ "$response" -ne 200 ]; then
+ echo "Cargo Tracker is not accessible"
+ exit 1
+ else
+ echo "Cargo Tracker is accessible"
+ fi
+ - name: Install kubectl
+ run: |
+ curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
+ curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256"
+ sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
+ kubectl version --client
+ - name: Connect to AKS cluster
+ run: |
+ echo "connect to cluster"
+ aksClusterName=$(az resource list --resource-group ${{ env.resourceGroupForWlsAks }} --query "[?type=='Microsoft.ContainerService/managedClusters'].name|[0]" -o tsv)
+ az aks get-credentials --resource-group ${{ env.resourceGroupForWlsAks }} --name $aksClusterName
+ - name: Generate&Apply configmap
+ run: |
+ wlsDomainNS=sample-domain1-ns
+ wlsConfigmapName=sample-domain1-wdt-config-map
+ wlsConfigmapJson=cargo-tracker-db.json
+ modelFilePath=models
+ mkdir ${modelFilePath}
+ echo "create configmap"
+ echo "export exisiting configmap"
+ kubectl -n ${wlsDomainNS} get configmap ${wlsConfigmapName} -o json >${wlsConfigmapJson}
+
+ echo "query model keys"
+ keyList=$(cat ${wlsConfigmapJson} | jq '.data | keys[]' | tr -d "\"")
+ for item in $keyList; do
+ data=$(cat ${wlsConfigmapJson} | jq ".data[\"${item}\"]")
+ data=$(echo "${data:1:${#data}-2}")
+ echo -e "${data}" >${modelFilePath}/${item}
+ done
+
+ # remove current configmap and create a new one
+ kubectl -n ${wlsDomainNS} delete configmap ${wlsConfigmapName}
+
+ cp cargotracker/src/test/aks/cargo-tracker-jms.yaml ${modelFilePath}/cargo-tracker-jms.yaml
+
+ kubectl -n ${wlsDomainNS} create configmap ${wlsConfigmapName} \
+ --from-file=${modelFilePath}
+
+ kubectl -n ${wlsDomainNS} label configmap ${wlsConfigmapName} \
+ weblogic.domainUID=sample-domain1
+ restartVersion=$(kubectl -n ${wlsDomainNS} get domain sample-domain1 '-o=jsonpath={.spec.restartVersion}')
+ # increase restart version
+ restartVersion=$((restartVersion + 1))
+ # record timestamp before apply changes
+ timestampBeforePatchingDomain=$(date +%s)
+ # get the replica number
+ clusterName=$(kubectl get cluster -n ${wlsDomainNS} -o json | jq -r '.items[0].metadata.name')
+ replicas=$(kubectl -n ${wlsDomainNS} get cluster ${clusterName} -o json | jq '. | .spec.replicas')
+ echo "append configmap and update restart version"
+ kubectl -n ${wlsDomainNS} patch domain sample-domain1 \
+ --type=json \
+ '-p=[{"op": "replace", "path": "/spec/restartVersion", "value": "'${restartVersion}'" }, {"op": "add", "path": "/spec/configuration/model/configMap", "value": "'${wlsConfigmapName}'" }]'
+ echo "timestampBeforePatchingDomain=${timestampBeforePatchingDomain}" >> $GITHUB_ENV
+ echo "replicas=${replicas}" >> $GITHUB_ENV
+ - name: Verify pods are restarted
+ run: |
+ # interval of checking pod status.
+ checkPodStatusInterval=20
+ # max attempt to check pod status.
+ checkPodStatusMaxAttemps=30
+ # domain and namespaces
+ wlsDomainUID="sample-domain1"
+ wlsDomainNS=${wlsDomainUID}-ns
+
+ updatedPodNum=0
+ attempt=0
+
+ echo $timestampBeforePatchingDomain $appReplicas $wlsDomainUID $checkPodStatusMaxAttemps $checkPodStatusInterval
+
+ while [[ ${updatedPodNum} -le ${appReplicas} ]] && [[ $attempt -le ${checkPodStatusMaxAttemps} ]]; do
+ echo "attempts ${attempt}"
+ ret=$(kubectl get pods -n ${wlsDomainNS} -l weblogic.domainUID=${wlsDomainUID} -o json | jq '.items[] | .metadata.creationTimestamp' | tr -d "\"")
+
+ counter=0
+ for item in $ret; do
+ podCreateTimeStamp=$(date -u -d "${item}" +"%s")
+ echo "pod create time: $podCreateTimeStamp, base time: ${timestampBeforePatchingDomain}"
+ if [[ ${podCreateTimeStamp} -gt ${timestampBeforePatchingDomain} ]]; then
+ counter=$((counter + 1))
+ fi
+ done
+
+ updatedPodNum=$counter
+ echo "Number of new pod: ${updatedPodNum}"
+
+ attempt=$((attempt + 1))
+ sleep ${checkPodStatusInterval}
+ done
+
+ if [[ ${attempt} -gt ${checkPodStatusMaxAttemps} ]]; then
+ echo "Failed to restart all weblogic server pods. "
+ exit 1
+ fi
+ cleanup:
+ needs: [deploy-wls-on-aks, preflight]
+ if: ${{ always() && needs.preflight.outputs.isForDemo == 'false' }}
+ runs-on: ubuntu-latest
+ steps:
+ - name: Get AZ CLI Version
+ run: |
+ echo "azCliVersion=${{needs.preflight.outputs.azCliVersion}}" >> $GITHUB_ENV
+ - uses: azure/login@v1
+ id: azure-login
+ with:
+ creds: ${{ secrets.AZURE_CREDENTIALS }}
+ - name: Delete Storage Account Resource Group
+ id: delete-sa-resource-group
+ uses: azure/CLI@v1
+ with:
+ azcliversion: ${{ env.azCliVersion }}
+ inlineScript: |
+ echo "delete... " ${{ env.resourceGroupForStorageAccount }}
+ az group delete --yes --no-wait --verbose --name ${{ env.resourceGroupForStorageAccount }}
+ - name: Delete DB Resource Group
+ id: delete-db-resource-group
+ uses: azure/CLI@v1
+ with:
+ azcliversion: ${{ env.azCliVersion }}
+ inlineScript: |
+ echo "delete... " ${{ env.resourceGroupForDB }}
+ az group delete --yes --no-wait --verbose --name ${{ env.resourceGroupForDB }}
+ - name: Delete AKS Resource Group
+ id: delete-aks-resource-group
+ uses: azure/CLI@v1
+ with:
+ azcliversion: ${{ env.azCliVersion }}
+ inlineScript: |
+ echo "delete... " ${{ env.resourceGroupForWlsAks }}
+ az group delete --yes --no-wait --verbose --name ${{ env.resourceGroupForWlsAks }}
diff --git a/.github/workflows/testWlsAksWithoutDependencyCreation.yml b/.github/workflows/testWlsAksWithoutDependencyCreation.yml
new file mode 100644
index 000000000..5c9df73dc
--- /dev/null
+++ b/.github/workflows/testWlsAksWithoutDependencyCreation.yml
@@ -0,0 +1,400 @@
+name: Test WLS on AKS without dependency creation
+
+on:
+ workflow_dispatch:
+ inputs:
+ gitUserNameForArtifactsLocation:
+ description: 'Replaced into https://raw.githubusercontent.com/#gitUserName#/weblogic-azure/#testbranchName#/weblogic-azure-aks/src/main/arm/'
+ required: true
+ default: oracle
+ testBranchNameForArtifactsLocation:
+ description: 'Replaced into https://raw.githubusercontent.com/#gitUserName#/weblogic-azure/#testbranchName#/weblogic-azure-aks/src/main/arm/'
+ required: true
+ default: main
+ isForDemo:
+ description: 'If set to true, resources will not be deleted'
+ required: true
+ default: 'false'
+ storageAccountName:
+ description: 'Specify storage account of uploading .war file'
+ required: true
+ storageContainerName:
+ description: 'Specify name of storage container within account'
+ required: true
+ dbName:
+ description: 'Name of the database. Get from another pipeline run'
+ required: true
+ vmSize:
+ description: 'The VM size for the AKS pool'
+ required: true
+ default: Standard_D2s_v3
+ # sample cURL
+ # curl --verbose -X POST https://api.github.com/repos//weblogic-azure/dispatches -H 'Accept: application/vnd.github.everest-preview+json' -H 'Authorization: token ' --data ''
+ # sample
+ # {"event_type": "aks-integration-test-without-dependency-creation", "client_payload": {"gitUserNameForArtifactsLocation": "", "testBranchNameForArtifactsLocation": "", "isForDemo": "false", "storageAccountName": "wlsdsa13971210545", "storageContainerName": "wlsdcon13971210545", "dbName": "wlsdb13971210545"}}
+ # the request data can be get from setupWlsAksDependency pipeline, please checkout the summary page and download the generated artifact name 'integration-test-data'
+ repository_dispatch:
+ types: [aks-integration-test-without-dependency-creation,integration-test-all]
+
+env:
+ azureCredentials: ${{ secrets.AZURE_CREDENTIALS }}
+ location: eastus
+ dbAdminUser: weblogic
+ dbPassword: ${{ secrets.DB_PASSWORD }}
+ dbServerName: weblogicdb
+ ocrSSOPSW: ${{ secrets.ORC_SSOPSW }}
+ ocrSSOUser: ${{ secrets.ORC_SSOUSER }}
+ wdtRuntimePassword: ${{ secrets.WDT_RUNTIMEPSW}}
+ wlsUserName: ${{ secrets.WLS_USERNAME }}
+ wlsPassword: ${{ secrets.WLS_PSW }}
+
+jobs:
+ preflight:
+ runs-on: ubuntu-latest
+ outputs:
+ artifactName: ${{steps.artifact_file.outputs.artifactName}}
+ resourceGroupForWlsAks: ${{ steps.setup-env-variables-based-on-dispatch-event.outputs.resourceGroupForWlsAks }}
+ dbName: ${{ steps.setup-env-variables-based-on-dispatch-event.outputs.dbName }}
+ storageAccountName: ${{ steps.setup-env-variables-based-on-dispatch-event.outputs.storageAccountName }}
+ storageContainerName: ${{ steps.setup-env-variables-based-on-dispatch-event.outputs.storageContainerName }}
+ isForDemo: ${{ steps.setup-env-variables-based-on-dispatch-event.outputs.isForDemo }}
+ gitUserNameForArtifactsLocation: ${{ steps.setup-env-variables-based-on-dispatch-event.outputs.gitUserNameForArtifactsLocation }}
+ testBranchNameForArtifactsLocation: ${{ steps.setup-env-variables-based-on-dispatch-event.outputs.testBranchNameForArtifactsLocation }}
+ azCliVersion: ${{steps.set-az-cli-version.outputs.azCliVersion}}
+ vmSize: ${{ steps.setup-env-variables-based-on-dispatch-event.outputs.vmSize }}
+ steps:
+ - uses: actions/checkout@v2.3.4
+ - name: Set AZ CLI Version
+ uses: ./.github/actions/setvars
+ with:
+ varFilePath: ./.github/variables/vm-dependencies.env
+ - name: Output Az CLi version
+ id: set-az-cli-version
+ run: |
+ echo "azCliVersion=${azCliVersion}" >> $GITHUB_OUTPUT
+ - name: Get versions of external dependencies
+ id: get-external-dependencies-version
+ run: |
+ curl -Lo external-deps-versions.properties https://raw.githubusercontent.com/Azure/azure-javaee-iaas/main/external-deps-versions.properties
+ source external-deps-versions.properties
+ echo "bicepVersion=${BICEP_VERSION}" >> $GITHUB_ENV
+ echo "refArmttk=${ARM_TTK_REFERENCE}" >> $GITHUB_ENV
+ - name: Setup environment variables
+ id: setup-env-variables-based-on-dispatch-event
+ run: |
+ if [ ${{ github.event_name }} == 'workflow_dispatch' ]; then
+ resourceGroupForWlsAks=wlsd-aks-${{ github.repository_owner }}-${{ github.run_id }}-${{ github.run_number }}
+ dbName=${{ github.event.inputs.dbName }}
+ storageAccountName=${{ github.event.inputs.storageAccountName }}
+ storageContainerName=${{ github.event.inputs.storageContainerName }}
+ isForDemo=${{ github.event.inputs.isForDemo }}
+ gitUserNameForArtifactsLocation=${{ github.event.inputs.gitUserNameForArtifactsLocation }}
+ testBranchNameForArtifactsLocation=${{ github.event.inputs.testBranchNameForArtifactsLocation }}
+ vmSize=${{ github.event.inputs.vmSize }}
+ else
+ resourceGroupForWlsAks=wlsd-aks-${{ github.repository_owner }}-${{ github.run_id }}-${{ github.run_number }}
+ dbName=${{ github.event.client_payload.dbName }}
+ storageAccountName=${{ github.event.client_payload.storageAccountName }}
+ storageContainerName=${{ github.event.client_payload.storageContainerName }}
+ isForDemo=${{ github.event.client_payload.isForDemo }}
+ gitUserNameForArtifactsLocation=${{ github.event.client_payload.gitUserNameForArtifactsLocation }}
+ testBranchNameForArtifactsLocation=${{ github.event.client_payload.testBranchNameForArtifactsLocation }}
+ vmSize=${{ github.event.client_payload.vmSize }}
+ fi
+
+ echo "##[set-output name=resourceGroupForWlsAks;]${resourceGroupForWlsAks}"
+ echo "##[set-output name=dbName;]${dbName}"
+ echo "##[set-output name=storageAccountName;]${storageAccountName}"
+ echo "##[set-output name=storageContainerName;]${storageContainerName}"
+ echo "##[set-output name=isForDemo;]${isForDemo}"
+ echo "##[set-output name=gitUserNameForArtifactsLocation;]${gitUserNameForArtifactsLocation}"
+ echo "##[set-output name=testBranchNameForArtifactsLocation;]${testBranchNameForArtifactsLocation}"
+ echo "##[set-output name=vmSize;]${vmSize}"
+
+ echo "resourceGroupForWlsAks=${resourceGroupForWlsAks}" >> $GITHUB_ENV
+ echo "dbName=${dbName}" >> $GITHUB_ENV
+ echo "storageAccountName=${storageAccountName}" >> $GITHUB_ENV
+ echo "storageContainerName=${storageContainerName}" >> $GITHUB_ENV
+ echo "isForDemo=${isForDemo}" >> $GITHUB_ENV
+ echo "gitUserNameForArtifactsLocation=${gitUserNameForArtifactsLocation}" >> $GITHUB_ENV
+ echo "testBranchNameForArtifactsLocation=${testBranchNameForArtifactsLocation}" >> $GITHUB_ENV
+ echo "vmSize=${vmSize}" >> $GITHUB_ENV
+ - uses: actions/checkout@v2.3.4
+ - name: Set up Maven with GitHub token
+ uses: ./.github/actions/setupmaven
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
+ - name: Set up bicep
+ run: |
+ curl -Lo bicep https://github.com/Azure/bicep/releases/download/${bicepVersion}/bicep-linux-x64
+ chmod +x ./bicep
+ sudo mv ./bicep /usr/local/bin/bicep
+ bicep --version
+ - name: Checkout arm-ttk
+ uses: actions/checkout@v2
+ with:
+ repository: Azure/arm-ttk
+ path: arm-ttk
+ ref: ${{ env.refArmttk }}
+ - name: Checkout weblogic-azure
+ uses: actions/checkout@v2
+ with:
+ path: weblogic-azure
+ - name: Build and test weblogic-azure/weblogic-azure-aks
+ run: mvn -Pbicep -Passembly clean install -Ptemplate-validation-tests --file weblogic-azure/weblogic-azure-aks/pom.xml
+ - name: Generate artifact file name and path
+ id: artifact_file
+ run: |
+ version=$(mvn -q -Dexec.executable=echo -Dexec.args='${version.wls-on-aks-azure-marketplace}' --file weblogic-azure/pom.xml --non-recursive exec:exec)
+ artifactName=wls-on-aks-azure-marketplace-$version-arm-assembly
+ unzip weblogic-azure/weblogic-azure-aks/target/$artifactName.zip -d weblogic-azure/weblogic-azure-aks/target/$artifactName
+ echo "##[set-output name=artifactName;]${artifactName}"
+ echo "##[set-output name=artifactPath;]weblogic-azure/weblogic-azure-aks/target/$artifactName"
+ - name: Archive weblogic-azure/weblogic-azure-aks template
+ uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
+ if: success()
+ with:
+ name: ${{steps.artifact_file.outputs.artifactName}}
+ path: ${{steps.artifact_file.outputs.artifactPath}}
+ - name: Checkout Azure-Samples/cargotracker-wls-aks
+ uses: actions/checkout@v2
+ with:
+ repository: Azure-Samples/cargotracker-wls-aks
+ path: cargotracker
+ - name: Maven build web app
+ run: |
+ mvn clean install -PweblogicOnAks --file cargotracker/pom.xml
+ - uses: azure/login@v1
+ id: azure-login
+ with:
+ creds: ${{ env.azureCredentials }}
+ - name: Upload built web app war file
+ uses: azure/CLI@v1
+ with:
+ azcliversion: ${{ env.azCliVersion }}
+ inlineScript: |
+ az storage blob upload \
+ --account-name ${{ env.storageAccountName }} \
+ --container-name ${{ env.storageContainerName }} \
+ --file cargotracker/target/cargo-tracker.war \
+ --name cargo-tracker.war \
+ --overwrite
+ deploy-wls-on-aks:
+ needs: preflight
+ runs-on: ubuntu-latest
+ steps:
+ - name: Get AZ CLI Version
+ run: |
+ echo "azCliVersion=${{needs.preflight.outputs.azCliVersion}}" >> $GITHUB_ENV
+ - name: Checkout weblogic-azure
+ uses: actions/checkout@v2
+ with:
+ path: weblogic-azure
+ - name: Download artifact for deployment
+ uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
+ with:
+ name: ${{needs.preflight.outputs.artifactName}}
+ path: ${{needs.preflight.outputs.artifactName}}
+ - uses: azure/login@v1
+ id: azure-login
+ with:
+ creds: ${{ env.azureCredentials }}
+ - name: Query web app blob url and set to env
+ uses: azure/CLI@v1
+ with:
+ azcliversion: ${{ env.azCliVersion }}
+ inlineScript: |
+ sasTokenValidTime=3600
+ expiryData=$(( `date +%s`+${sasTokenValidTime}))
+ sasEnd=`date -d@"$expiryData" -u '+%Y-%m-%dT%H:%MZ'`
+ sasToken=$(az storage account generate-sas \
+ --permissions r \
+ --account-name ${{ needs.preflight.outputs.storageAccountName }} \
+ --services b \
+ --resource-types sco \
+ --expiry $sasEnd -o tsv)
+ cargoTrackerBlobUrl=$(az storage blob url \
+ --container-name ${{ needs.preflight.outputs.storageContainerName }} \
+ --name cargo-tracker.war \
+ --account-name ${{ needs.preflight.outputs.storageAccountName }} \
+ --sas-token ${sasToken} -o tsv)
+
+ echo "cargoTrackerBlobUrl=${cargoTrackerBlobUrl}" >> $GITHUB_ENV
+ - name: Create Resource Group
+ uses: azure/CLI@v1
+ with:
+ azcliversion: ${{ env.azCliVersion }}
+ inlineScript: |
+ echo "create resource group" ${{ needs.preflight.outputs.resourceGroupForWlsAks }}
+ az group create --verbose --name ${{ needs.preflight.outputs.resourceGroupForWlsAks }} --location ${{ env.location }}
+ - name: Checkout Azure-Samples/cargotracker-wls-aks
+ uses: actions/checkout@v2
+ with:
+ repository: Azure-Samples/cargotracker-wls-aks
+ path: cargotracker
+ - name: Prepare parameter file
+ run: |
+ echo "generate parameter file"
+ bash weblogic-azure/weblogic-azure-aks/src/test/genWlsAksParameters.sh \
+ ${{ needs.preflight.outputs.gitUserNameForArtifactsLocation }} \
+ ${{ needs.preflight.outputs.testBranchNameForArtifactsLocation }} \
+ "${cargoTrackerBlobUrl}" \
+ ${dbPassword} \
+ ${dbAdminUser} \
+ jdbc:postgresql:\/\/${{ needs.preflight.outputs.dbName }}.postgres.database.azure.com:5432\/${{ env.dbServerName }} \
+ ${location} \
+ ${ocrSSOPSW} \
+ ${ocrSSOUser} \
+ ${wdtRuntimePassword} \
+ ${wlsPassword} \
+ ${wlsUserName} \
+ ${{ needs.preflight.outputs.vmSize }} \
+ weblogic-azure/weblogic-azure-aks/src/test/setupWlsAksParameters.jsonc
+ - name: Deploy WebLogic Server Cluster Domain offer
+ id: deploy-wls-cluster
+ uses: azure/CLI@v1
+ with:
+ azcliversion: ${{ env.azCliVersion }}
+ inlineScript: |
+ artifactName=${{ needs.preflight.outputs.artifactName }}
+
+ az deployment group create \
+ --verbose \
+ --resource-group ${{ needs.preflight.outputs.resourceGroupForWlsAks }} \
+ --name wls-on-aks \
+ --parameters @weblogic-azure/weblogic-azure-aks/src/test/setupWlsAksParameters.jsonc \
+ --template-file ${artifactName}/mainTemplate.json
+ - name: Query Application Gateway URL
+ run: |
+ appgatewayname=$(az resource list --resource-group ${{ needs.preflight.outputs.resourceGroupForWlsAks }} --query "[?type=='Microsoft.Network/applicationGateways'].name|[0]" -o tsv)
+ echo $appgatewayname
+ publicIpAddressId=$(az network application-gateway show --resource-group ${{ needs.preflight.outputs.resourceGroupForWlsAks }} --name ${appgatewayname} --query frontendIPConfigurations[0].publicIPAddress.id -o tsv)
+ echo $publicIpAddressId
+ appGatewayURL=$(az network public-ip show --resource-group ${{ needs.preflight.outputs.resourceGroupForWlsAks }} --ids ${publicIpAddressId} --query dnsSettings.fqdn -o tsv)
+ echo $appGatewayURL
+ echo "appGatewayURL=${appGatewayURL}" >> $GITHUB_ENV
+ - name: Verify Cargo Tracker is deployed as expected
+ run: |
+ echo "Verifying Cargo Tracker is deployed as expected"
+ curl --verbose http://${{ env.appGatewayURL }}/cargo-tracker/
+ response=$(curl --write-out '%{http_code}' --silent --output /dev/null http://${{ env.appGatewayURL }}/cargo-tracker/)
+ echo "$response"
+ if [ "$response" -ne 200 ]; then
+ echo "Cargo Tracker is not accessible"
+ exit 1
+ else
+ echo "Cargo Tracker is accessible"
+ fi
+ - name: Install kubectl
+ run: |
+ curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
+ curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256"
+ sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
+ kubectl version --client
+ - name: Connect to AKS cluster
+ run: |
+ echo "connect to cluster"
+ aksClusterName=$(az resource list --resource-group ${{ needs.preflight.outputs.resourceGroupForWlsAks }} --query "[?type=='Microsoft.ContainerService/managedClusters'].name|[0]" -o tsv)
+ az aks get-credentials --resource-group ${{ needs.preflight.outputs.resourceGroupForWlsAks }} --name $aksClusterName
+ - name: Generate&Apply configmap
+ run: |
+ wlsDomainNS=sample-domain1-ns
+ wlsConfigmapName=sample-domain1-wdt-config-map
+ wlsConfigmapJson=cargo-tracker-db.json
+ modelFilePath=models
+ mkdir ${modelFilePath}
+ echo "create configmap"
+ echo "export exisiting configmap"
+ kubectl -n ${wlsDomainNS} get configmap ${wlsConfigmapName} -o json >${wlsConfigmapJson}
+
+ echo "query model keys"
+ keyList=$(cat ${wlsConfigmapJson} | jq '.data | keys[]' | tr -d "\"")
+ for item in $keyList; do
+ data=$(cat ${wlsConfigmapJson} | jq ".data[\"${item}\"]")
+ data=$(echo "${data:1:${#data}-2}")
+ echo -e "${data}" >${modelFilePath}/${item}
+ done
+
+ # remove current configmap and create a new one
+ kubectl -n ${wlsDomainNS} delete configmap ${wlsConfigmapName}
+
+ cp cargotracker/src/test/aks/cargo-tracker-jms.yaml ${modelFilePath}/cargo-tracker-jms.yaml
+
+ kubectl -n ${wlsDomainNS} create configmap ${wlsConfigmapName} \
+ --from-file=${modelFilePath}
+
+ kubectl -n ${wlsDomainNS} label configmap ${wlsConfigmapName} \
+ weblogic.domainUID=sample-domain1
+ restartVersion=$(kubectl -n ${wlsDomainNS} get domain sample-domain1 '-o=jsonpath={.spec.restartVersion}')
+ # increase restart version
+ restartVersion=$((restartVersion + 1))
+ # record timestamp before apply changes
+ timestampBeforePatchingDomain=$(date +%s)
+ # get the replica number
+ clusterName=$(kubectl get cluster -n ${wlsDomainNS} -o json | jq -r '.items[0].metadata.name')
+ replicas=$(kubectl -n ${wlsDomainNS} get cluster ${clusterName} -o json | jq '. | .spec.replicas')
+ echo "append configmap and update restart version"
+ kubectl -n ${wlsDomainNS} patch domain sample-domain1 \
+ --type=json \
+ '-p=[{"op": "replace", "path": "/spec/restartVersion", "value": "'${restartVersion}'" }, {"op": "add", "path": "/spec/configuration/model/configMap", "value": "'${wlsConfigmapName}'" }]'
+ echo "timestampBeforePatchingDomain=${timestampBeforePatchingDomain}" >> $GITHUB_ENV
+ echo "replicas=${replicas}" >> $GITHUB_ENV
+ - name: Verify pods are restarted
+ run: |
+ # interval of checking pod status.
+ checkPodStatusInterval=20
+ # max attempt to check pod status.
+ checkPodStatusMaxAttemps=30
+ # domain and namespaces
+ wlsDomainUID="sample-domain1"
+ wlsDomainNS=${wlsDomainUID}-ns
+
+ updatedPodNum=0
+ attempt=0
+
+ echo $timestampBeforePatchingDomain $appReplicas $wlsDomainUID $checkPodStatusMaxAttemps $checkPodStatusInterval
+
+ while [[ ${updatedPodNum} -le ${appReplicas} ]] && [[ $attempt -le ${checkPodStatusMaxAttemps} ]]; do
+ echo "attempts ${attempt}"
+ ret=$(kubectl get pods -n ${wlsDomainNS} -l weblogic.domainUID=${wlsDomainUID} -o json | jq '.items[] | .metadata.creationTimestamp' | tr -d "\"")
+
+ counter=0
+ for item in $ret; do
+ podCreateTimeStamp=$(date -u -d "${item}" +"%s")
+ echo "pod create time: $podCreateTimeStamp, base time: ${timestampBeforePatchingDomain}"
+ if [[ ${podCreateTimeStamp} -gt ${timestampBeforePatchingDomain} ]]; then
+ counter=$((counter + 1))
+ fi
+ done
+
+ updatedPodNum=$counter
+ echo "Number of new pod: ${updatedPodNum}"
+
+ attempt=$((attempt + 1))
+ sleep ${checkPodStatusInterval}
+ done
+
+ if [[ ${attempt} -gt ${checkPodStatusMaxAttemps} ]]; then
+ echo "Failed to restart all weblogic server pods. "
+ exit 1
+ fi
+ cleanup:
+ needs: [deploy-wls-on-aks, preflight]
+ if: ${{ needs.preflight.outputs.isForDemo == 'false' }}
+ runs-on: ubuntu-latest
+ steps:
+ - name: Get AZ CLI Version
+ run: |
+ echo "azCliVersion=${{needs.preflight.outputs.azCliVersion}}" >> $GITHUB_ENV
+ - uses: azure/login@v1
+ id: azure-login
+ with:
+ creds: ${{ env.azureCredentials }}
+ - name: Delete AKS Resource Group
+ id: delete-aks-resource-group
+ uses: azure/CLI@v1
+ with:
+ azcliversion: ${{ env.azCliVersion }}
+ inlineScript: |
+ echo "delete... " ${{ needs.preflight.outputs.resourceGroupForWlsAks }}
+ az group delete --yes --no-wait --verbose --name ${{ needs.preflight.outputs.resourceGroupForWlsAks }}
diff --git a/.github/workflows/testWlsVmAdmin.yml b/.github/workflows/testWlsVmAdmin.yml
new file mode 100644
index 000000000..ebd99dbba
--- /dev/null
+++ b/.github/workflows/testWlsVmAdmin.yml
@@ -0,0 +1,486 @@
+# Copyright (c) 2021, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+name: Test Admin Server on VM
+run-name: Test Admin Server on VM with `db`:${{ inputs.databaseType }}
+
+on:
+ workflow_dispatch:
+ inputs:
+ location:
+ description: 'The location for the resources'
+ required: true
+ default: centralus
+ databaseType:
+ description: 'Database connection'
+ required: true
+ default: 'mssqlserver'
+ type: choice
+ options:
+ - none
+ - mssqlserver
+ - mssqlserver-passwordless
+ - postgresql(flexible)
+ - postgresql-passwordless(flexible)
+ configurations_for_it:
+ description: "JSON string of environment variables used for IT"
+ required: false
+ default: '{}'
+
+ # Allows you to run this workflow using GitHub APIs
+ # PERSONAL_ACCESS_TOKEN=
+ # REPO_NAME=mriccell/weblogic-azure
+ # curl --verbose -XPOST -u "mriccell:${PERSONAL_ACCESS_TOKEN}" -H "Accept: application/vnd.github.everest-preview+json" -H "Content-Type: application/json" https://api.github.com/repos/${REPO_NAME}/dispatches --data '{"event_type": "test-vm-admin", "client_payload": {"ref": "refs/heads/main"}}'
+ repository_dispatch:
+ types: [test-vm-admin,integration-test-all]
+
+env:
+ adminConsolePort: 7005
+ adminVMName: adminServerVM
+ adminPassword: ${{ secrets.WLS_PSW }}
+ dbAdminUser: weblogic
+ dbName: wlsdb${{ github.run_id }}${{ github.run_number }}
+ dbServerName: weblogicdb-${{ github.run_id }}-${{ github.run_number }}
+ uamiName: uami${{ github.run_id }}${{ github.run_number }}
+ gitToken: ${{ secrets.GIT_TOKEN }}
+ offerName: arm-oraclelinux-wls-admin
+ offerPath: weblogic-azure/weblogic-azure-vm/arm-oraclelinux-wls-admin
+ testbranchName: cicd-${{ github.run_id }}-${{ github.run_number }}
+ repoName: weblogic-azure
+ repoOwner: ${{ github.repository_owner }}
+ resourceGroupPrefix: wls-${{ github.repository_owner }}-${{ github.run_id }}-${{ github.run_number }}
+ resourceGroupForDependency: wlsd-${{ github.repository_owner }}-${{ github.run_id }}-${{ github.run_number }}
+ userEmail: ${{ secrets.USER_EMAIL }}
+ userName: ${{ secrets.USER_NAME }}
+ wlsPassword: ${{ secrets.WLS_PSW }}
+ wlsDomainName: adminDomain
+ wlsUserName: weblogic
+ ref: ${{ github.ref_name }}
+ images: |
+ owls-141100-jdk11-ol91;Oracle:weblogic-141100-jdk11-ol91:owls-141100-jdk11-ol91;latest
+ owls-141100-jdk11-ol87;Oracle:weblogic-141100-jdk11-ol87:owls-141100-jdk11-ol87;latest
+ owls-141100-jdk8-ol91;Oracle:weblogic-141100-jdk8-ol91:owls-141100-jdk8-ol91;latest
+ owls-141100-jdk8-ol87;Oracle:weblogic-141100-jdk8-ol87:owls-141100-jdk8-ol87;latest
+ owls-122140-jdk8-ol91;Oracle:weblogic-122140-jdk8-ol91:owls-122140-jdk8-ol91;latest
+ owls-122140-jdk8-ol87;Oracle:weblogic-122140-jdk8-ol87:owls-122140-jdk8-ol87;latest
+ owls-122140-jdk8-ol76;Oracle:weblogic-122140-jdk8-ol76:owls-122140-jdk8-ol7;latest
+ owls-141100-jdk8-ol76;Oracle:weblogic-141100-jdk8-ol76:owls-141100-jdk8-ol7;latest
+ owls-141100-jdk11-ol76;Oracle:weblogic-141100-jdk11-ol76:owls-141100-jdk11-ol7;latest
+ owls-122140-jdk8-rhel76;Oracle:weblogic-122140-jdk8-rhel76:owls-122140-jdk8-rhel76;latest
+ owls-141100-jdk8-rhel76;Oracle:weblogic-141100-jdk8-rhel76:owls-141100-jdk8-rhel76;latest
+ owls-141100-jdk11-rhel76;Oracle:weblogic-141100-jdk11-rhel76:owls-141100-jdk11-rhel76;latest
+
+jobs:
+ preflight:
+ outputs:
+ location: ${{ steps.setup-env-variables-based-on-dispatch-event.outputs.location }}
+ artifactName: ${{steps.artifact_file.outputs.artifactName}}
+ runs-on: ubuntu-latest
+ steps:
+ - name: Setup environment variables
+ id: setup-env-variables-based-on-dispatch-event
+ run: |
+ location=eastus # default value
+
+ if [ ${{ github.event_name }} == 'workflow_dispatch' ]; then
+ location=${{ github.event.inputs.location }}
+ else
+ location=${{ github.event.client_payload.location }}
+ fi
+
+ echo "##[set-output name=location;]${location}"
+ echo "location=${location}" >> $GITHUB_ENV
+ - uses: actions/checkout@v2.3.4
+ - name: Set up Maven with GitHub token
+ uses: ./.github/actions/setupmaven
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
+ - uses: actions/checkout@v2.3.4
+ - name: Set dependency reference
+ uses: ./.github/actions/setvars
+ with:
+ varFilePath: ./.github/variables/vm-dependencies.env
+ - name: Checkout arm-ttk
+ uses: actions/checkout@v2
+ with:
+ repository: Azure/arm-ttk
+ path: arm-ttk
+ ref: ${{ env.refArmTtk }}
+ - name: Checkout ${{env.repoOwner}}/${{env.repoName}}
+ uses: actions/checkout@v2
+ with:
+ repository: ${{env.repoOwner}}/${{env.repoName}}
+ path: ${{env.repoName}}
+ - name: Build and test ${{ env.offerName }}
+ run: |
+ ls
+ mvn -Ptemplate-validation-tests clean install -Dgit.repo.owner=${{ env.repoOwner }} -Dgit.tag=${{ env.ref }} --file ${offerPath}/pom.xml
+
+ - name: Checkout ${{env.repoOwner}}/${{env.repoName}} for test
+ uses: actions/checkout@v2
+ with:
+ repository: ${{env.repoOwner}}/${{env.repoName}}
+ path: ${{env.repoName}}-dev
+ - name: Create a new branch with development pids in nestedtemplates
+ run: |
+ current=`pwd`
+ echo "current=${current}" >> $GITHUB_ENV
+ offerDevPath=${{ env.repoName }}-dev/weblogic-azure-vm/${{env.offerName}}
+ cd ${offerDevPath}/src/main/arm/nestedtemplates
+ git config --global core.longpaths true
+ git config --global user.email $userEmail
+ git config --global user.name $userName
+ echo "create branch $testbranchName"
+ git checkout -b $testbranchName
+ rm -r -f $current/${offerDevPath}/src/main/arm/nestedtemplates/*
+ cp -r -f $current/${{ env.offerPath }}/target/arm/nestedtemplates/* $current/${offerDevPath}/src/main/arm/nestedtemplates/
+ git status
+ git commit -a -m "hard code pids"
+ git push https://$gitToken@github.com/${GITHUB_REPOSITORY}.git -f
+
+ - uses: azure/login@v1
+ id: azure-login
+ with:
+ creds: ${{ secrets.AZURE_CREDENTIALS }}
+ - name: Validate deployment templates for different combinations of service integration
+ id: validate-deployment-templates
+ run: |
+ bash ${{ env.offerPath }}/test/scripts/verify-deployments.sh \
+ <<< "${{ github.run_id }}${{ github.run_number }} ${location} \
+ ${{ env.offerPath }}/target/arm/mainTemplate.json \
+ ${GITHUB_REPOSITORY} ${testbranchName} ${{ env.offerPath }}/test/scripts"
+
+ - name: Get version information from pom.xml
+ id: version
+ run: |
+ version=$(mvn -q -Dexec.executable=echo -Dexec.args='${version.${{ env.offerName }}}' --file weblogic-azure/pom.xml --non-recursive exec:exec)
+ echo "version=${version}" >> $GITHUB_ENV
+ - name: Print version
+ run: echo $version
+ - name: Generate artifact name
+ run: echo "artifactName=${{ env.offerName }}-$version-arm-assembly" >> $GITHUB_ENV
+ - name: Print artifact name
+ run: echo $artifactName
+ - name: Output artifact name
+ id: artifact_file
+ run: echo "##[set-output name=artifactName;]${{ env.offerName }}-$version-arm-assembly"
+ - name: Generate zip package path
+ id: artifact_path
+ run: echo "##[set-output name=artifactPath;]${{ env.offerPath }}/target/$artifactName"
+ - name: Output artifact path
+ run: echo $artifactPath
+ env:
+ artifactPath: ${{steps.package.outputs.artifactPath}}
+ - name: Unzip the package as upload action will zip again
+ run: unzip ${{ env.offerPath }}/target/$artifactName.zip -d ${{ env.offerPath }}/target/$artifactName
+
+ - name: Archive ${{ env.offerName }} template
+ uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
+ if: success()
+ with:
+ name: ${{steps.artifact_file.outputs.artifactName}}
+ path: ${{steps.artifact_path.outputs.artifactPath}}
+
+ deploy-dependencies:
+ needs: preflight
+ runs-on: ubuntu-latest
+ steps:
+ - name: Initialize environment variables
+ run: |
+ echo "location=${{needs.preflight.outputs.location}}" >> $GITHUB_ENV
+ - uses: azure/login@v1
+ id: azure-login
+ with:
+ creds: ${{ secrets.AZURE_CREDENTIALS }}
+ - name: Create Resource Group
+ id: create-resource-group
+ run: |
+ echo "create resource group" ${{ env.resourceGroupForDependency }}
+ az group create --verbose --name ${{ env.resourceGroupForDependency }} --location ${location}
+ - uses: actions/checkout@v2.3.4
+
+ deploy-weblogic-admin:
+ needs: [deploy-dependencies, preflight]
+ runs-on: ubuntu-latest
+ env: ${{ fromJson(inputs.configurations_for_it) }}
+ steps:
+ - name: Initialize environment variables
+ run: |
+ echo "location=${{needs.preflight.outputs.location}}" >> $GITHUB_ENV
+ echo "artifactName=${{ needs.preflight.outputs.artifactName }}" >> $GITHUB_ENV
+
+ - name: Checkout weblogic-azure repository
+ uses: actions/checkout@v4
+ with:
+ path: ${{env.repoName}}
+
+ - name: Download artifact for deployment
+ uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
+ with:
+ name: ${{ needs.preflight.outputs.artifactName }}
+ path: ${{needs.preflight.outputs.artifactName}}
+
+ - uses: azure/login@v1
+ id: azure-login
+ with:
+ creds: ${{ secrets.AZURE_CREDENTIALS }}
+
+ - name: Provision database
+ id: database-provision
+ uses: ./weblogic-azure/.github/actions/database-provision
+ with:
+ databaseType: ${{ inputs.databaseType }}
+ resourceGroup: ${{ env.resourceGroupForDependency }}
+ uamiName: ${{ env.uamiName }}
+ location: ${{ env.location }}
+ dbInstanceName: ${{ env.dbServerName }}
+ dbPassword: ${{ env.wlsPassword }}
+ databaseName: ${{ env.dbName }}
+ dbAdminUser: ${{ env.dbAdminUser }}
+
+ - name: Get database parameters
+ id: database-parameters
+ uses: ./weblogic-azure/.github/actions/database-parameters
+ with:
+ databaseType: ${{ inputs.databaseType }}
+ uamiId: ${{ steps.database-provision.outputs.uamiId }}
+ serverHost: ${{ steps.database-provision.outputs.serverHost }}
+ dbInstanceName: ${{ env.dbServerName }}
+ databaseName: ${{ env.dbName }}
+ dbAdminUser: ${{ env.dbAdminUser }}
+
+ - name: Provision Azure Vnet
+ id: vnet-provision
+ if: ${{ env.virtualNetworkNewOrExisting == 'existing' }}
+ run: |
+ echo "Provisioning Azure Vnet with subnet"
+ az network vnet create \
+ --resource-group ${{ env.resourceGroupForDependency }} \
+ --name ${{ env.virtualNetworkName }} \
+ --address-prefix 10.0.0.0/28 \
+ --subnet-name ${{ env.subnetName }} \
+ --subnet-prefix 10.0.0.0/29
+
+ - name: Try each image until one succeeds
+ run: |
+ # Convert multiline string to array
+ IFS=$'\n' read -d '' -r -a image_array <<< "${{ env.images }}" || true
+
+ success=false
+
+ for image in "${image_array[@]}"; do
+ if [ -z "$image" ]; then
+ continue
+ fi
+
+ echo "::group::Trying image: $image"
+
+ # Set deployment variables for this image
+ imageUrn="$image"
+ sku=${imageUrn%%;*}
+ resourceGroup=$(echo "${resourceGroupPrefix}-${sku}" | sed "s/_//g")
+
+ echo "Deploying with SKU: $sku"
+ echo "Resource Group: $resourceGroup"
+
+ # Export db variables for envsubst
+ export databaseType='${{ steps.database-parameters.outputs.databaseType }}'
+ export enableDB=${{ steps.database-parameters.outputs.enableDB }}
+ export enablePswlessConnection=${{ steps.database-parameters.outputs.enablePswlessConnection }}
+ export dsConnectionURL='${{ steps.database-parameters.outputs.dsConnectionURL }}'
+ export dbUser='${{ steps.database-parameters.outputs.dbUser }}'
+ export dbIdentity='${{ steps.database-parameters.outputs.dbIdentity }}'
+ export dbPassword='${{ env.wlsPassword}}'
+ export dbName='${{ env.dbName }}'
+
+ # Try deployment with this image
+ if bash -c "
+ set -e
+
+ # Create Resource Group
+ echo 'Creating resource group: $resourceGroup'
+ az group create --verbose --name '$resourceGroup' --location '${location}'
+
+ ## Prepare Vnet parameters
+ if [ "${{ env.virtualNetworkNewOrExisting }}" == "existing" ]; then
+ export virtualNetworkResourceGroupName=${{ env.resourceGroupForDependency }}
+ export virtualNetworkNewOrExisting=${{ env.virtualNetworkNewOrExisting }}
+ export virtualNetworkName=${{ env.virtualNetworkName }}
+ export subnetName=${{ env.subnetName }}
+ else
+ export virtualNetworkResourceGroupName='$resourceGroup'
+ export virtualNetworkNewOrExisting="new"
+ export virtualNetworkName="wls-vnet"
+ export subnetName="wls-subnet"
+ fi
+
+ # Export variables for envsubst
+ export adminPasswordOrKey='${{ env.wlsPassword }}'
+ export skuUrnVersion='$image'
+ export wlsdomainname='${{ env.wlsDomainName }}'
+ export adminVMName='${{ env.adminVMName }}'
+ export location='${{ env.location }}'
+ export wlsusername='${{ env.wlsUserName }}'
+ export wlspassword='${{ env.wlsPassword }}'
+ export repoPath='${GITHUB_REPOSITORY}'
+ export testbranchName='${{ env.testbranchName }}'
+
+ echo 'Generating parameter file...'
+ envsubst < './${{ env.offerPath }}/test/scripts/parameters-deploy-template.json' > './${{ env.offerPath }}/test/parameters-deploy-${{ github.job }}.json'
+
+ # Accept Image Terms
+ echo 'Accepting terms for image: $image'
+ rawUrn='$image'
+ publisherAndName=\$(echo \${rawUrn} | grep -o ';.*:' | sed 's/;//g')
+ imageVersion=\${rawUrn##*;}
+ az vm image terms accept --urn \${publisherAndName}$sku:\${imageVersion}
+
+ # Deploy WebLogic Server
+ echo 'Deploying WebLogic Server...'
+ az deployment group create \
+ --verbose \
+ --resource-group '$resourceGroup' \
+ --name wls-admin-node \
+ --parameters @'./${{ env.offerPath }}/test/parameters-deploy-${{ github.job }}.json' \
+ --template-file '${{needs.preflight.outputs.artifactName}}/mainTemplate.json'
+
+ # Get admin VM name
+ adminVMName=\$(az deployment group show \
+ --resource-group '$resourceGroup' \
+ --name wls-admin-node \
+ --query 'properties.outputs.adminVMName.value' -o tsv)
+
+ # Verify system services
+ echo 'Verifying WebLogic services...'
+ message=\$(az vm run-command invoke \
+ --resource-group '$resourceGroup' \
+ --name \$adminVMName \
+ --command-id RunShellScript \
+ --scripts @'${{ env.offerPath }}/test/scripts/verify-services.sh' \
+ --query value[*].message)
+
+ if [[ \$message == *'not in active (running) state'* ]]; then
+ echo 'Error: \$message'
+ exit 1
+ fi
+
+ if [ "${{ env.virtualNetworkNewOrExisting }}" == "existing" ]; then
+ # If using existing VNet, there are some gaps to verify the deployment using following steps.
+ echo 'skip following steps, only verify the deployment for existing vnet'
+ exit 0
+ fi
+
+ # Configure network security group
+ echo 'Configuring network security group...'
+ nsg=\$(az network nsg list \
+ --resource-group '$resourceGroup' \
+ --query '[0].name' -o tsv)
+
+ az network nsg rule update \
+ --resource-group '$resourceGroup' \
+ --nsg-name \${nsg} \
+ --name WebLogicAdminChannelPort \
+ --access Allow \
+ --source-address-prefixes 10.0.0.0/24
+
+ # Get public IP
+ publicIP=\$(az vm show \
+ --resource-group '$resourceGroup' \
+ --name \$adminVMName -d \
+ --query publicIps -o tsv)
+
+ # Verify WebLogic Server Access
+ echo 'Verifying WebLogic Server Access...'
+ bash '${{ env.offerPath }}/test/scripts/verify-wls-access.sh' <<< \"\${publicIP} ${adminConsolePort}\"
+
+ echo 'SUCCESS: All verification steps passed!'
+ exit 0
+ "; then
+ echo "✅ SUCCESS: WebLogic deployment succeeded with image: $image"
+ echo "successful_image=$image" >> $GITHUB_ENV
+ echo "successful_resource_group=$resourceGroup" >> $GITHUB_ENV
+ success=true
+
+ # Clean up successful deployment
+ echo "Cleaning up resource group: $resourceGroup"
+ az group delete --yes --no-wait --verbose --name "$resourceGroup" || true
+
+ break
+ else
+ echo "❌ FAILED: WebLogic deployment failed with image: $image, trying next..."
+ # Clean up failed deployment
+ echo "Cleaning up failed resource group: $resourceGroup"
+ az group delete --yes --no-wait --verbose --name "$resourceGroup" || true
+ fi
+ echo "::endgroup::"
+ done
+
+ if [ "$success" = "false" ]; then
+ echo "💥 All images failed!"
+ exit 1
+ else
+ echo "🎉 Workflow succeeded with image: ${{ env.successful_image }}"
+ fi
+
+ cleanup-github-resource:
+ needs: deploy-weblogic-admin
+ if: always()
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout ${{ env.repoOwner }}/${{ env.repoName }}
+ uses: actions/checkout@v2
+ with:
+ repository: ${{ env.repoOwner }}/${{ env.repoName }}
+ path: ${{ env.repoName }}
+ - name: Delete testing branch
+ run: |
+ cd ${{ env.repoName }}
+ git push https://$gitToken@github.com/${GITHUB_REPOSITORY}.git -f --delete $testbranchName
+
+ cleanup-az-resource:
+ needs: deploy-weblogic-admin
+ if: always()
+ runs-on: ubuntu-latest
+ steps:
+ - uses: azure/login@v1
+ id: azure-login
+ with:
+ creds: ${{ secrets.AZURE_CREDENTIALS }}
+ - name: Delete DB Resource Group
+ id: delete-db-resource-group
+ run: |
+ echo "delete... " $resourceGroup
+ az group delete --yes --no-wait --verbose --name ${{ env.resourceGroupForDependency }}
+ - name: Delete Resource Group
+ id: delete-resource-group
+ run: |
+ echo "delete resource group with prefix:" ${{ env.resourceGroupPrefix }}
+ az group list --query "[?starts_with(name, '${{ env.resourceGroupPrefix }}')].[name]" -o tsv | xargs -I {} az group delete --name {} --yes --no-wait
+
+ summary:
+ needs: deploy-weblogic-admin
+ if: always()
+ runs-on: ubuntu-latest
+ steps:
+ - name: summarize jobs
+ if: ${{ github.repository_owner == 'wls-eng' }}
+ run: |
+ workflow_jobs=$(curl -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/${GITHUB_REPOSITORY}/actions/runs/${{ github.run_id }}/jobs)
+ critical_job_num=$(echo $workflow_jobs | jq '.jobs | map(select(.name|test("^deploy-weblogic-admin."))) | length')
+ echo "$critical_job_num"
+ succeed_critical_job_num=$(echo $workflow_jobs | jq '.jobs | map(select(.conclusion=="success") | select(.name|test("^deploy-weblogic-admin."))) | length')
+ echo "$succeed_critical_job_num"
+ failed_job_num="$(($critical_job_num-$succeed_critical_job_num))"
+ echo $failed_job_num
+ if (($failed_job_num >= 2));then
+ echo "too many jobs failed, send notification to Teams"
+ curl ${{ secrets.MSTEAMS_WEBHOOK }} \
+ -H 'Content-Type: application/json' \
+ --data-binary @- << EOF
+ {
+ "@context":"http://schema.org/extensions",
+ "@type":"MessageCard",
+ "text":"$failed_job_num jobs failed in Admin Offer's workflow, please take a look at: https://github.com/${GITHUB_REPOSITORY}/actions/runs/${{ github.run_id }}"
+ }
+ EOF
+ fi
diff --git a/.github/workflows/testWlsVmCluster.yml b/.github/workflows/testWlsVmCluster.yml
new file mode 100644
index 000000000..451417b08
--- /dev/null
+++ b/.github/workflows/testWlsVmCluster.yml
@@ -0,0 +1,551 @@
+#Copyright (c) 2021 Oracle and/or its affiliates.
+#Released under the Universal Permissive License v1.0 as shown at
+# https://oss.oracle.com/licenses/upl/
+
+name: Test Configured Cluster on VM
+run-name: Test Configured Cluster on VM with `db`:${{ inputs.databaseType }}
+
+on:
+ workflow_dispatch:
+ inputs:
+ location:
+ description: 'The location for the resources'
+ required: true
+ default: centralus
+ databaseType:
+ description: 'Database connection'
+ required: true
+ default: 'mssqlserver'
+ type: choice
+ options:
+ - mssqlserver
+ - mssqlserver-passwordless
+ - postgresql(flexible)
+ - postgresql-passwordless(flexible)
+ configurations_for_it:
+ description: "JSON string of environment variables used for IT"
+ required: false
+ default: '{}'
+ # Allows you to run this workflow using GitHub APIs
+ # PERSONAL_ACCESS_TOKEN=
+ # REPO_NAME=oracle/weblogic-azure/weblogic-azure-vm/arm-oraclelinux-wls-cluster
+ # curl --verbose -XPOST -u "mriccell:${PERSONAL_ACCESS_TOKEN}" -H "Accept: application/vnd.github.everest-preview+json" -H "Content-Type: application/json" https://api.github.com/repos/${REPO_NAME}/dispatches --data '{"event_type": "test-vm-cluster", "client_payload": {}}'
+ repository_dispatch:
+ types: [test-vm-cluster,integration-test-all]
+
+env:
+ adminConsolePort: 7001
+ adminPassword: ${{ secrets.WLS_PSW }}
+ dbAdminUser: weblogic
+ dbName: wlsdb${{ github.run_id }}${{ github.run_number }}
+ dbServerName: weblogicdb-${{ github.run_id }}-${{ github.run_number }}
+ uamiName: uami${{ github.run_id }}${{ github.run_number }}
+ gitEmail: ${{ secrets.USER_EMAIL }}
+ gitToken: ${{ secrets.GIT_TOKEN }}
+ gitUserName: ${{ secrets.USER_NAME }}
+ managedServers: "msp1"
+ numberOfInstances: 2
+ offerName: arm-oraclelinux-wls-cluster
+ offerPath: weblogic-azure/weblogic-azure-vm/arm-oraclelinux-wls-cluster
+ repoName: weblogic-azure
+ repoOwner: ${{ github.repository_owner }}
+ resourceGroupForDependency: wlsd-${{ github.repository_owner }}-${{ github.run_id }}-${{ github.run_number }}
+ resourceGroupPrefix: wls-${{ github.repository_owner }}-${{ github.run_id }}-${{ github.run_number }}
+ testbranchName: cicd-${{ github.run_id }}-${{ github.run_number }}
+ wlsDomainName: wlsd
+ wlsPassword: ${{ secrets.WLS_PSW }}
+ wlsUserName: weblogic
+ ref: ${{ github.ref_name }}
+ images: |
+ owls-141100-jdk11-ol91;Oracle:weblogic-141100-jdk11-ol91:owls-141100-jdk11-ol91;latest
+ owls-141100-jdk11-ol87;Oracle:weblogic-141100-jdk11-ol87:owls-141100-jdk11-ol87;latest
+ owls-141100-jdk8-ol91;Oracle:weblogic-141100-jdk8-ol91:owls-141100-jdk8-ol91;latest
+ owls-141100-jdk8-ol87;Oracle:weblogic-141100-jdk8-ol87:owls-141100-jdk8-ol87;latest
+ owls-122140-jdk8-ol91;Oracle:weblogic-122140-jdk8-ol91:owls-122140-jdk8-ol91;latest
+ owls-122140-jdk8-ol87;Oracle:weblogic-122140-jdk8-ol87:owls-122140-jdk8-ol87;latest
+ owls-122140-jdk8-ol76;Oracle:weblogic-122140-jdk8-ol76:owls-122140-jdk8-ol7;latest
+ owls-141100-jdk8-ol76;Oracle:weblogic-141100-jdk8-ol76:owls-141100-jdk8-ol7;latest
+ owls-141100-jdk11-ol76;Oracle:weblogic-141100-jdk11-ol76:owls-141100-jdk11-ol7;latest
+ owls-122140-jdk8-rhel76;Oracle:weblogic-122140-jdk8-rhel76:owls-122140-jdk8-rhel76;latest
+ owls-141100-jdk8-rhel76;Oracle:weblogic-141100-jdk8-rhel76:owls-141100-jdk8-rhel76;latest
+ owls-141100-jdk11-rhel76;Oracle:weblogic-141100-jdk11-rhel76:owls-141100-jdk11-rhel76;latest
+
+jobs:
+ preflight:
+ outputs:
+ artifactName: ${{steps.artifact_file.outputs.artifactName}}
+ location: ${{ steps.setup-env-variables-based-on-dispatch-event.outputs.location }}
+ runs-on: ubuntu-latest
+ steps:
+ - name: Setup environment variables
+ id: setup-env-variables-based-on-dispatch-event
+ run: |
+ location=centralus # default value
+
+ if [ ${{ github.event_name }} == 'workflow_dispatch' ]; then
+ location=${{ github.event.inputs.location }}
+ else
+ location=${{ github.event.client_payload.location }}
+ fi
+
+ echo "##[set-output name=location;]${location}"
+ echo "location=${location}" >> $GITHUB_ENV
+
+ - uses: actions/checkout@v2.3.4
+ - name: Set up Maven with GitHub token
+ uses: ./.github/actions/setupmaven
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
+ - uses: actions/checkout@v2.3.4
+ - name: Set dependency reference
+ uses: ./.github/actions/setvars
+ with:
+ varFilePath: ./.github/variables/vm-dependencies.env
+ - name: Checkout arm-ttk
+ uses: actions/checkout@v2
+ with:
+ repository: Azure/arm-ttk
+ path: arm-ttk
+ ref: ${{ env.refArmttk }}
+ - name: Checkout ${{env.repoOwner}}/${{env.repoName}}
+ uses: actions/checkout@v2
+ with:
+ repository: ${{env.repoOwner}}/${{env.repoName}}
+ path: ${{env.repoName}}
+
+ - name: Build and test ${{ env.offerName }}
+ run: |
+ mvn -Ptemplate-validation-tests clean install --file ${offerPath}/pom.xml -Dgit.repo.owner=${{ env.repoOwner }} -Dgit.tag=${{ env.ref }}
+ - name: Checkout ${{env.repoOwner}}/${{env.repoName}}
+ uses: actions/checkout@v2
+ with:
+ repository: ${{env.repoOwner}}/${{env.repoName}}
+ path: ${{env.repoName}}-dev
+ - name: Create a new branch with development pids in nestedtemplates
+ run: |
+ current=`pwd`
+ echo "current=${current}" >> $GITHUB_ENV
+ offerDevPath=${{ env.repoName }}-dev/weblogic-azure-vm/${{env.offerName}}/${{env.offerName}}
+ cd ${offerDevPath}/src/main/arm/nestedtemplates
+ git config --global core.longpaths true
+ git config --global user.email $gitEmail
+ git config --global user.name $gitUserName
+ echo "create branch $testbranchName"
+ git checkout -b $testbranchName
+ rm -r -f $current/${offerDevPath}/src/main/arm/nestedtemplates/*
+ cp -r -f $current/${offerPath}/${{ env.offerName }}/target/arm/nestedtemplates/* $current/${offerDevPath}/src/main/arm/nestedtemplates/
+ git status
+ git commit -a -m "hard code pids"
+ git push https://$gitToken@github.com/${GITHUB_REPOSITORY}.git -f
+
+ - uses: azure/login@v1
+ id: azure-login
+ with:
+ creds: ${{ secrets.AZURE_CREDENTIALS }}
+ - name: Validate deployment templates for different combinations of service integration
+ id: validate-deployment-templates
+ run: |
+ bash ${{ env.offerPath }}/test/scripts/verify-deployments.sh <<< "${{ github.run_id }}${{ github.run_number }} ${location} \
+ ${{ env.offerPath }}/${{ env.offerName }}/target/arm/mainTemplate.json \
+ ${GITHUB_REPOSITORY} ${testbranchName} ${{ env.offerPath }}/test/scripts"
+
+ - name: Generate artifact file name and path
+ id: artifact_file
+ run: |
+ version=$(mvn -q -Dexec.executable=echo -Dexec.args='${version.${{ env.offerName }}}' --file weblogic-azure/pom.xml --non-recursive exec:exec)
+ artifactName=${{ env.offerName }}-$version-arm-assembly
+ unzip ${{ env.offerPath }}/${{ env.offerName }}/target/$artifactName.zip -d ${{ env.offerPath }}/${{ env.offerName }}/target/$artifactName
+ echo "##[set-output name=artifactName;]${artifactName}"
+ echo "##[set-output name=artifactPath;]${{ env.offerPath }}/${{ env.offerName }}/target/$artifactName"
+ - name: Archive ${{ env.offerName }} template
+ uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
+ if: success()
+ with:
+ name: ${{steps.artifact_file.outputs.artifactName}}
+ path: ${{steps.artifact_file.outputs.artifactPath}}
+
+ deploy-dependencies:
+ needs: preflight
+ runs-on: ubuntu-latest
+ steps:
+ - name: Initialize environment variables
+ run: |
+ echo "location=${{needs.preflight.outputs.location}}" >> $GITHUB_ENV
+ - uses: azure/login@v1
+ id: azure-login
+ with:
+ creds: ${{ secrets.AZURE_CREDENTIALS }}
+
+ - name: Create Resource Group
+ run: |
+ echo "create resource group" ${{ env.resourceGroupForDependency }}
+ az group create --verbose --name ${{ env.resourceGroupForDependency }} --location ${location}
+
+ deploy-weblogic-cluster:
+ needs: [deploy-dependencies, preflight]
+ runs-on: ubuntu-latest
+ env: ${{ fromJson(inputs.configurations_for_it) }}
+ steps:
+ - name: Initialize environment variables
+ run: |
+ echo "location=${{needs.preflight.outputs.location}}" >> $GITHUB_ENV
+
+ - name: Checkout weblogic-azure repository
+ uses: actions/checkout@v4
+ with:
+ path: ${{env.repoName}}
+
+ - name: Download artifact for deployment
+ uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
+ with:
+ name: ${{ needs.preflight.outputs.artifactName }}
+ path: ${{needs.preflight.outputs.artifactName}}
+
+ - uses: azure/login@v1
+ id: azure-login
+ with:
+ creds: ${{ secrets.AZURE_CREDENTIALS }}
+
+ - name: Checkout WebLogic-Cafe
+ id: checkout-webapp
+ uses: actions/checkout@v2
+ with:
+ repository: azure-javaee/weblogic-on-azure
+ path: weblogic-on-azure
+
+ - name: Maven build the web app
+ id: maven-build-webapp
+ run: |
+ echo "build the WebLogic Cafe web app"
+ mvn -DskipTests clean install --file weblogic-on-azure/javaee/weblogic-cafe/pom.xml
+
+ - name: Provision database
+ id: database-provision
+ uses: ./weblogic-azure/.github/actions/database-provision
+ with:
+ databaseType: ${{ inputs.databaseType }}
+ resourceGroup: ${{ env.resourceGroupForDependency }}
+ uamiName: ${{ env.uamiName }}
+ location: ${{ env.location }}
+ dbInstanceName: ${{ env.dbServerName }}
+ dbPassword: ${{ env.wlsPassword }}
+ databaseName: ${{ env.dbName }}
+ dbAdminUser: ${{ env.dbAdminUser }}
+
+ - name: Get database parameters
+ id: database-parameters
+ uses: ./weblogic-azure/.github/actions/database-parameters
+ with:
+ databaseType: ${{ inputs.databaseType }}
+ uamiId: ${{ steps.database-provision.outputs.uamiId }}
+ serverHost: ${{ steps.database-provision.outputs.serverHost }}
+ dbInstanceName: ${{ env.dbServerName }}
+ databaseName: ${{ env.dbName }}
+ dbAdminUser: ${{ env.dbAdminUser }}
+
+ - name: Provision Azure Vnet
+ id: vnet-provision
+ if: ${{ env.virtualNetworkNewOrExisting == 'existing' }}
+ run: |
+ echo "Provisioning Azure Vnet with subnet"
+ az network vnet create \
+ --resource-group ${{ env.resourceGroupForDependency }} \
+ --name ${{ env.virtualNetworkName }} \
+ --address-prefix 10.0.0.0/23 \
+ --subnet-name ${{ env.subnetForCluster }} \
+ --subnet-prefix 10.0.0.0/26
+
+ echo "Provisioning Azure Subnet for App Gateway"
+ az network vnet subnet create \
+ --resource-group ${{ env.resourceGroupForDependency }} \
+ --name ${{ env.subnetForAppGateway }} \
+ --vnet-name ${{ env.virtualNetworkName }} \
+ --address-prefix 10.0.1.0/24
+
+ - name: Try each image until one succeeds
+ run: |
+ # Convert multiline string to array
+ IFS=$'\n' read -d '' -r -a image_array <<< "${{ env.images }}" || true
+
+ success=false
+
+ for image in "${image_array[@]}"; do
+ if [ -z "$image" ]; then
+ continue
+ fi
+
+ echo "::group::Trying image: $image"
+
+ # Set deployment variables for this image
+ imageUrn="$image"
+ sku=${imageUrn%%;*}
+ resourceGroup=$(echo "${resourceGroupPrefix}-${sku}" | sed "s/_//g")
+
+ echo "Deploying with SKU: $sku"
+ echo "Resource Group: $resourceGroup"
+
+ # Export db variables for envsubst
+ export databaseType='${{ steps.database-parameters.outputs.databaseType }}'
+ export enableDB=${{ steps.database-parameters.outputs.enableDB }}
+ export enablePswlessConnection=${{ steps.database-parameters.outputs.enablePswlessConnection }}
+ export dsConnectionURL='${{ steps.database-parameters.outputs.dsConnectionURL }}'
+ export dbUser='${{ steps.database-parameters.outputs.dbUser }}'
+ export dbIdentity='${{ steps.database-parameters.outputs.dbIdentity }}'
+ export dbPassword='${{ env.wlsPassword}}'
+ export dbName='${{ env.dbName }}'
+
+ # Try deployment with this image
+ if bash -c "
+ set -e
+
+ # Create Resource Group
+ echo 'Creating resource group: $resourceGroup'
+ az group create --verbose --name '$resourceGroup' --location '${location}'
+
+ ## Prepare parameters for Vnet
+ if [ "${{ env.virtualNetworkNewOrExisting }}" == "existing" ]; then
+ export virtualNetworkNewOrExisting=${{ env.virtualNetworkNewOrExisting }}
+ export virtualNetworkResourceGroupName=${{ env.resourceGroupForDependency }}
+ export virtualNetworkName=${{ env.virtualNetworkName }}
+ export subnetForCluster=${{ env.subnetForCluster }}
+ export subnetForAppGateway=${{ env.subnetForAppGateway }}
+ else
+ export virtualNetworkNewOrExisting='new'
+ export virtualNetworkResourceGroupName='$resourceGroup'
+ export virtualNetworkName='wlscluster-vnet-cluster'
+ export subnetForCluster='jboss-subnet-for-cluster'
+ export subnetForAppGateway='appgateway-subnet'
+ fi
+
+ export repoPath='${GITHUB_REPOSITORY}'
+ export testbranchName='${{ env.testbranchName }}'
+ export location='${{ env.location }}'
+ export adminPasswordOrKey='${{ env.wlsPassword }}'
+ export wlsusername='${{ env.wlsUserName }}'
+ export wlsdomainname='${{ env.wlsDomainName }}'
+ export skuUrnVersion='$image'
+ export wlspassword='${{ env.wlsPassword }}'
+
+ echo 'Generating parameter file...'
+ envsubst < './${{ env.offerPath }}/test/scripts/parameters-deploy-template.json' > './${{ env.offerPath }}/test/parameters-deploy-${{ github.job }}.json'
+
+ # Accept Image Terms
+ echo 'Accepting terms for image: $image'
+ rawUrn='$image'
+ publisherAndName=\$(echo \${rawUrn} | grep -o ';.*:' | sed 's/;//g')
+ imageVersion=\${rawUrn##*;}
+ az vm image terms accept --urn \${publisherAndName}$sku:\${imageVersion}
+
+ # Deploy WebLogic Server Cluster Domain offer
+ echo 'Deploying WebLogic Server Cluster...'
+ az deployment group create \
+ --verbose \
+ --resource-group '$resourceGroup' \
+ --name wls-cluster-node \
+ --parameters @'./${{ env.offerPath }}/test/parameters-deploy-${{ github.job }}.json' \
+ --template-file '${{needs.preflight.outputs.artifactName}}/mainTemplate.json'
+
+ if [ "${{ env.virtualNetworkNewOrExisting }}" == "existing" ]; then
+ # If using existing VNet, there are some gaps to verify the deployment using following steps.
+ echo 'skip following steps, only verify the deployment for existing vnet'
+ exit 0
+ fi
+
+ # Get admin VM name
+ echo 'Retrieving admin VM name...'
+ adminVMName=\$(az deployment group show \
+ --resource-group '$resourceGroup' \
+ --name wls-cluster-node \
+ --query 'properties.outputs.adminVMName.value' -o tsv)
+
+ # Configure network security group
+ echo 'Configuring network security group...'
+ nsg=\$(az network nsg list \
+ --resource-group '$resourceGroup' \
+ --query '[0].name' -o tsv)
+
+ az network nsg rule update \
+ --resource-group '$resourceGroup' \
+ --nsg-name \${nsg} \
+ --name WebLogicAdminChannelPort \
+ --access Allow \
+ --source-address-prefixes 10.0.0.0/24
+
+ # Verify system services at admin server
+ echo 'Verifying WebLogic services at admin server...'
+ message=\$(az vm run-command invoke \
+ --resource-group '$resourceGroup' \
+ --name \$adminVMName \
+ --command-id RunShellScript \
+ --scripts @'${{ env.offerPath }}/test/scripts/verify-admin-services.sh' \
+ --query value[*].message)
+
+ if [[ \$message == *'not in active (running) state'* ]]; then
+ echo 'Error: \$message'
+ exit 1
+ fi
+
+ # Verify wls managed server services
+ echo 'Verifying WebLogic managed server services...'
+ managedServerVMNamePrefix=\$(az deployment group show \
+ --resource-group '$resourceGroup' \
+ --name wls-cluster-node \
+ --query 'properties.outputs.managedServerVMNamePrefix.value' -o tsv)
+
+ managedServer1=\"\${managedServerVMNamePrefix}1\"
+
+ message=\$(az vm run-command invoke \
+ --resource-group '$resourceGroup' \
+ --name \$managedServer1 \
+ --command-id RunShellScript \
+ --scripts @'${{ env.offerPath }}/test/scripts/verify-node-services.sh' \
+ --query value[*].message)
+
+ if [[ \$message == *'not in active (running) state'* ]]; then
+ echo 'Error: \$message'
+ exit 1
+ fi
+
+ # Get public IP
+ publicIP=\$(az vm show \
+ --resource-group '$resourceGroup' \
+ --name \$adminVMName -d \
+ --query publicIps -o tsv)
+
+ # Verify WebLogic Server Access
+ echo 'Verifying WebLogic Server Access...'
+ bash '${{ env.offerPath }}/test/scripts/verify-wls-access.sh' <<< \"\${publicIP} ${adminConsolePort} $wlsUserName $wlsPassword $managedServers\"
+
+ # Verify WebLogic Managed Server LifeCycle check
+ echo 'Verifying WebLogic managed server lifecycle...'
+ bash '${{ env.offerPath }}/test/scripts/verify-servers-lifecycle.sh' <<< \"$wlsUserName ${wlsPassword} \${publicIP} $adminConsolePort $managedServers\"
+
+ # Query appGatewayURL
+ echo 'Querying app gateway URL...'
+ appGatewayURL=\$(az deployment group show \
+ --resource-group '$resourceGroup' \
+ --name wls-cluster-node \
+ --query 'properties.outputs.appGatewayURL.value' -o tsv)
+
+ # Checkout WebLogic-Cafe (done outside the loop)
+
+ # Query admin VM DNS
+ echo 'Querying admin VM DNS...'
+ adminNicId=\$(az vm show \
+ --resource-group '$resourceGroup' \
+ --name \$adminVMName \
+ --query networkProfile.networkInterfaces[0].id -o tsv)
+ adminPublicIPId=\$(az network nic show --id \${adminNicId} --query ipConfigurations[0].publicIPAddress.id -o tsv)
+ adminVMDNS=\$(az network public-ip show \
+ --id \"\${adminPublicIPId}\" \
+ --query dnsSettings.fqdn -o tsv)
+
+ # Deploy WebLogicCafe app (need to checkout and build first)
+ echo 'Deploying WebLogic Cafe app...'
+ timeout 6m sh -c 'until nc -zv \$0 \$1; do echo \"nc rc: \$?\"; sleep 5; done' \${adminVMDNS} ${adminConsolePort}
+ bash '${{ env.offerPath }}/test/scripts/deploy-webapp.sh' <<< \"$wlsUserName $wlsPassword \${adminVMDNS} ${adminConsolePort}\"
+
+ # Verify WebLogicCafe app deployment
+ echo 'Verifying WebLogic Cafe app deployment...'
+ bash '${{ env.offerPath }}/test/scripts/verify-webapp-deployment.sh' <<< \"\${appGatewayURL}\"
+
+ echo 'SUCCESS: All verification steps passed!'
+ exit 0
+ "; then
+ echo "✅ SUCCESS: WebLogic cluster deployment succeeded with image: $image"
+ echo "successful_image=$image" >> $GITHUB_ENV
+ echo "successful_resource_group=$resourceGroup" >> $GITHUB_ENV
+ success=true
+
+ # Clean up successful deployment
+ echo "Cleaning up resource group: $resourceGroup"
+ az group delete --yes --no-wait --verbose --name "$resourceGroup" || true
+
+ break
+ else
+ echo "❌ FAILED: WebLogic cluster deployment failed with image: $image, trying next..."
+ # Clean up failed deployment
+ echo "Cleaning up failed resource group: $resourceGroup"
+ az group delete --yes --no-wait --verbose --name "$resourceGroup" || true
+ fi
+ echo "::endgroup::"
+ done
+
+ if [ "$success" = "false" ]; then
+ echo "💥 All images failed!"
+ exit 1
+ else
+ echo "🎉 Workflow succeeded with image: ${{ env.successful_image }}"
+ fi
+
+ cleanup-github-resource:
+ needs: deploy-weblogic-cluster
+ if: always()
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout ${{ env.repoName }}
+ uses: actions/checkout@v2
+ with:
+ repository: ${{env.repoOwner}}/${{env.repoName}}
+ path: ${{ env.repoName }}
+ - name: Delete testing branch
+ run: |
+ cd ${{ env.repoName }}
+ git push https://$gitToken@github.com/$GITHUB_REPOSITORY.git -f --delete $testbranchName
+
+ cleanup-az-resource:
+ if: always()
+ needs: deploy-weblogic-cluster
+ runs-on: ubuntu-latest
+ env: ${{ fromJson(inputs.configurations_for_it) }}
+ steps:
+ - uses: azure/login@v1
+ id: azure-login
+ with:
+ creds: ${{ secrets.AZURE_CREDENTIALS }}
+ - name: Delete resource group for dependency
+ id: delete-resource-group-for-dependency
+ run: |
+ echo "delete... " $resourceGroup
+ az group delete --yes --no-wait --verbose --name ${{ env.resourceGroupForDependency }}
+ - name: Delete existing Vnet
+ id: delete-vnet
+ if: ${{ env.virtualNetworkNewOrExisting == 'existing' }}
+ run: |
+ echo "wait only other resources have been deleted"
+ sleep 10m
+ echo "delete vnet" ${{ env.virtualNetworkName }} "in resource group" ${{ env.resourceGroupForDependency }}
+ az network vnet delete --name ${{ env.virtualNetworkName }} --resource-group ${{ env.resourceGroupForDependency }}
+ - name: Delete Resource Group
+ id: delete-resource-group
+ run: |
+ echo "delete resource group with prefix:" ${{ env.resourceGroupPrefix }}
+ az group list --query "[?starts_with(name, '${{ env.resourceGroupPrefix }}')].[name]" -o tsv | xargs -I {} az group delete --name {} --yes --no-wait
+
+ summary:
+ needs: deploy-weblogic-cluster
+ if: always()
+ runs-on: ubuntu-latest
+ steps:
+ - name: summarize jobs
+ if: ${{ github.repository_owner == 'wls-eng' }}
+ run: |
+ workflow_jobs=$(curl -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/${GITHUB_REPOSITORY}/actions/runs/${{ github.run_id }}/jobs)
+ critical_job_num=$(echo $workflow_jobs | jq '.jobs | map(select(.name|test("^deploy-weblogic-cluster$"))) | length')
+ echo "$critical_job_num"
+ succeed_critical_job_num=$(echo $workflow_jobs | jq '.jobs | map(select(.conclusion=="success") | select(.name|test("^deploy-weblogic-cluster$"))) | length')
+ echo "$succeed_critical_job_num"
+ failed_job_num="$(($critical_job_num-$succeed_critical_job_num))"
+ echo $failed_job_num
+ if (($failed_job_num >= 1));then
+ echo "job failed, send notification to Teams"
+ curl ${{ secrets.MSTEAMS_WEBHOOK }} \
+ -H 'Content-Type: application/json' \
+ --data-binary @- << EOF
+ {
+ "@context":"http://schema.org/extensions",
+ "@type":"MessageCard",
+ "text":"$failed_job_num job failed in Configured Cluster Offer's workflow, please take a look at: https://github.com/${GITHUB_REPOSITORY}/actions/runs/${{ github.run_id }}"
+ }
+ EOF
+ fi
diff --git a/.github/workflows/testWlsVmDynamicCluster.yml b/.github/workflows/testWlsVmDynamicCluster.yml
new file mode 100644
index 000000000..6bb532d93
--- /dev/null
+++ b/.github/workflows/testWlsVmDynamicCluster.yml
@@ -0,0 +1,510 @@
+#Copyright (c) 2021 Oracle and/or its affiliates.
+#Released under the Universal Permissive License v1.0 as shown at
+# https://oss.oracle.com/licenses/upl/
+
+name: Test Dynamic Cluster on VM
+run-name: Test Dynamic Cluster on VM with `db`:${{ inputs.databaseType }}
+
+on:
+ workflow_dispatch:
+ inputs:
+ location:
+ description: 'Location of the resources'
+ required: true
+ default: 'centralus'
+ databaseType:
+ description: 'Database connection'
+ required: true
+ default: 'mssqlserver'
+ type: choice
+ options:
+ - mssqlserver
+ - mssqlserver-passwordless
+ - postgresql(flexible)
+ - postgresql-passwordless(flexible)
+ # Allows you to run this workflow using GitHub APIs
+ # PERSONAL_ACCESS_TOKEN=
+ # REPO_NAME=oracle/weblogic-azure/weblogic-azure-vm/arm-oraclelinux-wls-dynamic-cluster
+ # curl --verbose -XPOST -u "mriccell:${PERSONAL_ACCESS_TOKEN}" -H "Accept: application/vnd.github.everest-preview+json" -H "Content-Type: application/json" https://api.github.com/repos/${REPO_NAME}/dispatches --data '{"event_type": "test-vm-dynamic-cluster"}'
+ repository_dispatch:
+ types: [test-vm-dynamic-cluster,integration-test-all]
+
+env:
+ adminConsolePort: 7001
+ adminPassword: ${{ secrets.WLS_PSW }}
+ dbAdminUser: weblogic
+ dbName: wlsdb${{ github.run_id }}${{ github.run_number }}
+ dbServerName: db${{ github.run_id }}${{ github.run_number }}
+ uamiName: uami${{ github.run_id }}${{ github.run_number }}
+ dynamicClusterSize: 1
+ gitEmail: ${{ secrets.USER_EMAIL }}
+ gitToken: ${{ secrets.GIT_TOKEN }}
+ gitUserName: ${{ secrets.USER_NAME }}
+ location: centralus
+ nsg: wls-nsg
+ managedServerPrefix: managedServer
+ managedServers: "managedServer1"
+ managedServerVM: "managedServerVM1"
+ maxDynamicClusterSize: 2
+ offerName: arm-oraclelinux-wls-dynamic-cluster
+ offerPath: weblogic-azure/weblogic-azure-vm/arm-oraclelinux-wls-dynamic-cluster
+ repoName: weblogic-azure
+ repoOwner: ${{ github.repository_owner }}
+ resourceGroupForDependency: wlsd-${{ github.repository_owner }}-${{ github.run_id }}-${{ github.run_number }}
+ resourceGroupPrefix: wls-${{ github.repository_owner }}-${{ github.run_id }}-${{ github.run_number }}
+ testbranchName: cicd-${{ github.run_id }}-${{ github.run_number }}
+ wlsDomainName: dyClusterDomain
+ wlsPassword: ${{ secrets.WLS_PSW }}
+ wlsUserName: weblogic
+ images: |
+ owls-141100-jdk11-ol91;Oracle:weblogic-141100-jdk11-ol91:owls-141100-jdk11-ol91;latest
+ owls-141100-jdk11-ol87;Oracle:weblogic-141100-jdk11-ol87:owls-141100-jdk11-ol87;latest
+ owls-141100-jdk8-ol91;Oracle:weblogic-141100-jdk8-ol91:owls-141100-jdk8-ol91;latest
+ owls-141100-jdk8-ol87;Oracle:weblogic-141100-jdk8-ol87:owls-141100-jdk8-ol87;latest
+ owls-122140-jdk8-ol91;Oracle:weblogic-122140-jdk8-ol91:owls-122140-jdk8-ol91;latest
+ owls-122140-jdk8-ol87;Oracle:weblogic-122140-jdk8-ol87:owls-122140-jdk8-ol87;latest
+ owls-122140-jdk8-ol76;Oracle:weblogic-122140-jdk8-ol76:owls-122140-jdk8-ol7;latest
+ owls-141100-jdk8-ol76;Oracle:weblogic-141100-jdk8-ol76:owls-141100-jdk8-ol7;latest
+ owls-141100-jdk11-ol76;Oracle:weblogic-141100-jdk11-ol76:owls-141100-jdk11-ol7;latest
+ owls-122140-jdk8-rhel76;Oracle:weblogic-122140-jdk8-rhel76:owls-122140-jdk8-rhel76;latest
+ owls-141100-jdk8-rhel76;Oracle:weblogic-141100-jdk8-rhel76:owls-141100-jdk8-rhel76;latest
+ owls-141100-jdk11-rhel76;Oracle:weblogic-141100-jdk11-rhel76:owls-141100-jdk11-rhel76;latest
+
+jobs:
+ preflight:
+ outputs:
+ artifactName: ${{steps.artifact_file.outputs.artifactName}}
+ location: ${{ steps.setup-env-variables-based-on-dispatch-event.outputs.location }}
+ runs-on: ubuntu-latest
+ steps:
+ - name: Setup environment variables
+ id: setup-env-variables-based-on-dispatch-event
+ run: |
+ location=centralus # default value
+
+ if [ ${{ github.event_name }} == 'workflow_dispatch' ]; then
+ location=${{ github.event.inputs.location }}
+ else
+ location=${{ github.event.client_payload.location }}
+ fi
+
+ echo "##[set-output name=location;]${location}"
+ echo "location=${location}" >> $GITHUB_ENV
+ - uses: actions/checkout@v2.3.4
+ - name: Set up Maven with GitHub token
+ uses: ./.github/actions/setupmaven
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
+ - uses: actions/checkout@v2.3.4
+ - name: Set dependency reference
+ uses: ./.github/actions/setvars
+ with:
+ varFilePath: ./.github/variables/vm-dependencies.env
+ - name: Checkout arm-ttk
+ uses: actions/checkout@v2
+ with:
+ repository: Azure/arm-ttk
+ path: arm-ttk
+ ref: ${{ env.refArmTtk }}
+ - name: Checkout ${{env.repoOwner}}/${{env.repoName}}
+ uses: actions/checkout@v2
+ with:
+ repository: ${{env.repoOwner}}/${{env.repoName}}
+ path: ${{env.repoName}}
+ - name: Built and test ${{env.offerName}}
+ run: mvn -Ptemplate-validation-tests clean install --file ${{ env.offerPath }}/pom.xml
+
+ - name: Checkout ${{env.repoOwner}}/${{env.repoName}}
+ uses: actions/checkout@v2
+ with:
+ repository: ${{env.repoOwner}}/${{env.repoName}}
+ path: ${{env.repoName}}-dev
+ - name: Create a new branch with development pids in nestedtemplates
+ run: |
+ current=`pwd`
+ echo "current=${current}" >> $GITHUB_ENV
+ offerDevPath=${{ env.repoName }}-dev/weblogic-azure-vm/${{env.offerName}}/${{env.offerName}}
+ cd ${offerDevPath}/src/main/arm/nestedtemplates
+ git config --global core.longpaths true
+ git config --global user.email $gitEmail
+ git config --global user.name $gitUserName
+ echo "create branch $testbranchName"
+ git checkout -b $testbranchName
+ rm -r -f $current/${offerDevPath}/src/main/arm/nestedtemplates/*
+ cp -r -f $current/${{ env.offerPath }}/${{ env.offerName }}/target/arm/nestedtemplates/* $current/${offerDevPath}/src/main/arm/nestedtemplates/
+ git status
+ git commit -a -m "hard code pids"
+ git push https://$gitToken@github.com/${GITHUB_REPOSITORY}.git -f
+
+ - uses: azure/login@v1
+ id: azure-login
+ with:
+ creds: ${{ secrets.AZURE_CREDENTIALS }}
+ - name: Validate deployment templates for different combinations of service integration
+ id: validate-deployment-templates
+ run: |
+ bash ${{ env.offerPath }}/test/scripts/verify-deployments.sh <<< \
+ "${{ github.run_id }}${{ github.run_number }} ${location} \
+ ${{ env.offerPath }}/${offerName}/target/arm/mainTemplate.json \
+ ${GITHUB_REPOSITORY} ${testbranchName} ${{ env.offerPath }}/test/scripts"
+
+ - name: Generate artifact file name and path
+ id: artifact_file
+ run: |
+ version=$(mvn -q -Dexec.executable=echo -Dexec.args='${version.${{ env.offerName }}}' --file weblogic-azure/pom.xml --non-recursive exec:exec)
+ artifactName=${{ env.offerName }}-$version-arm-assembly
+ unzip ${{ env.offerPath }}/${{ env.offerName }}/target/$artifactName.zip -d ${{ env.offerPath }}/${{ env.offerName }}/target/$artifactName
+ echo "##[set-output name=artifactName;]${artifactName}"
+ echo "##[set-output name=artifactPath;]${{ env.offerPath }}/${{ env.offerName }}/target/$artifactName"
+ - name: Archive ${{env.offerName}} template
+ uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
+ if: success()
+ with:
+ name: ${{steps.artifact_file.outputs.artifactName}}
+ path: ${{steps.artifact_file.outputs.artifactPath}}
+
+ deploy-dependencies:
+ needs: preflight
+ runs-on: ubuntu-latest
+ steps:
+ - name: Initialize environment variables
+ run: |
+ echo "location=${{needs.preflight.outputs.location}}" >> $GITHUB_ENV
+ - uses: azure/login@v1
+ id: azure-login
+ with:
+ creds: ${{ secrets.AZURE_CREDENTIALS }}
+ - name: Create Resource Group
+ id: create-resource-group
+ run: |
+ echo "create resource group" ${{ env.resourceGroupForDependency }}
+ az group create --verbose --name ${{ env.resourceGroupForDependency }} --location ${location}
+
+ deploy-weblogic-cluster:
+ needs: [preflight, deploy-dependencies]
+ runs-on: ubuntu-latest
+ steps:
+ - name: Initialize environment variables
+ run: |
+ echo "location=${{needs.preflight.outputs.location}}" >> $GITHUB_ENV
+
+ - name: Checkout weblogic-azure repository
+ uses: actions/checkout@v4
+ with:
+ path: ${{env.repoName}}
+
+ - name: Download artifact for deployment
+ uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
+ with:
+ name: ${{ needs.preflight.outputs.artifactName }}
+ path: ${{needs.preflight.outputs.artifactName}}
+
+ - uses: azure/login@v1
+ id: azure-login
+ with:
+ creds: ${{ secrets.AZURE_CREDENTIALS }}
+
+ - uses: actions/setup-java@v4
+ with:
+ distribution: 'microsoft'
+ java-version: '11'
+
+ - name: Checkout WebLogic-Cafe
+ uses: actions/checkout@v2
+ with:
+ repository: azure-javaee/weblogic-on-azure
+ path: weblogic-on-azure
+
+ - name: Maven build the web app
+ run: |
+ echo "build the WebLogic Cafe web app"
+ mvn -DskipTests clean install --file weblogic-on-azure/javaee/weblogic-cafe/pom.xml
+
+ - name: Provision database
+ id: database-provision
+ uses: ./weblogic-azure/.github/actions/database-provision
+ with:
+ databaseType: ${{ inputs.databaseType }}
+ resourceGroup: ${{ env.resourceGroupForDependency }}
+ uamiName: ${{ env.uamiName }}
+ location: ${{ env.location }}
+ dbInstanceName: ${{ env.dbServerName }}
+ dbPassword: ${{ env.wlsPassword }}
+ databaseName: ${{ env.dbName }}
+ dbAdminUser: ${{ env.dbAdminUser }}
+
+ - name: Get database parameters
+ id: database-parameters
+ uses: ./weblogic-azure/.github/actions/database-parameters
+ with:
+ databaseType: ${{ inputs.databaseType }}
+ uamiId: ${{ steps.database-provision.outputs.uamiId }}
+ serverHost: ${{ steps.database-provision.outputs.serverHost }}
+ dbInstanceName: ${{ env.dbServerName }}
+ databaseName: ${{ env.dbName }}
+ dbAdminUser: ${{ env.dbAdminUser }}
+
+ - name: Try each image until one succeeds
+ run: |
+ # Convert multiline string to array
+ IFS=$'\n' read -d '' -r -a image_array <<< "${{ env.images }}" || true
+
+ success=false
+
+ for image in "${image_array[@]}"; do
+ if [ -z "$image" ]; then
+ continue
+ fi
+
+ echo "::group::Trying image: $image"
+
+ # Set deployment variables for this image
+ imageUrn="$image"
+ sku=${imageUrn%%;*}
+ resourceGroup=$(echo "${resourceGroupPrefix}-${sku}" | sed "s/_//g")
+
+ echo "Deploying with SKU: $sku"
+ echo "Resource Group: $resourceGroup"
+
+ # Export db variables for envsubst
+ export databaseType='${{ steps.database-parameters.outputs.databaseType }}'
+ export enableDB=${{ steps.database-parameters.outputs.enableDB }}
+ export enablePswlessConnection=${{ steps.database-parameters.outputs.enablePswlessConnection }}
+ export dsConnectionURL='${{ steps.database-parameters.outputs.dsConnectionURL }}'
+ export dbUser='${{ steps.database-parameters.outputs.dbUser }}'
+ export dbIdentity='${{ steps.database-parameters.outputs.dbIdentity }}'
+ export dbPassword='${{ env.wlsPassword}}'
+ export dbName='${{ env.dbName }}'
+ export dbServerName='${{ env.dbServerName }}'
+
+ # Try deployment with this image
+ if bash -c "
+ set -e
+
+ # Create Resource Group
+ echo 'Creating resource group: $resourceGroup'
+ az group create --verbose --name '$resourceGroup' --location '${location}'
+
+ # Generate selfsigned certificate
+ echo 'Generate selfsigned certificate'
+ bash '${{ env.offerPath }}/test/scripts/generate-selfsigned-keystore.sh' <<< \
+ '${wlsPassword} ${wlsPassword} ${wlsPassword}'
+
+ echo 'Current working directory: $(pwd)'
+ # check whether identity.jks exists
+ if [ ! -f identity.jks ]; then
+ echo 'Error: identity.jks not found!'
+ else
+ echo 'identity.jks found!'
+ fi
+
+ # Generate deployment parameters
+ echo 'Generate deployment parameters...'
+ export location='${{ env.location }}'
+ export adminPasswordOrKey='${{ env.wlsPassword }}'
+ export wlsdomainname='${{ env.wlsDomainName }}'
+ export wlsusername='${{ env.wlsUserName }}'
+ export wlspassword='${{ env.wlsPassword }}'
+ export maxDynamicClusterSize='${{ env.maxDynamicClusterSize }}'
+ export dynamicClusterSize='${{ env.dynamicClusterSize }}'
+ export skuUrnVersion='$image'
+ export testbranchName='${{ env.testbranchName }}'
+ export managedServerPrefix=${{ env.managedServerPrefix }}
+ export repoPath='${GITHUB_REPOSITORY}'
+ export uploadedKeyStoreData=\$(cat identity.jks | base64 -w 0)
+
+ export DOLLAR='$'
+ echo 'Generating parameter file...'
+ envsubst < './${{ env.offerPath }}/test/scripts/parameters-deploy-template.json' > './${{ env.offerPath }}/test/parameters-deploy-${{ github.job }}.json'
+
+ # Accept Image Terms
+ echo 'Accepting terms for image: $image'
+ rawUrn='$image'
+ publisherAndName=\$(echo \${rawUrn} | grep -o ';.*:' | sed 's/;//g')
+ imageVersion=\${rawUrn##*;}
+ az vm image terms accept --urn \${publisherAndName}$sku:\${imageVersion}
+
+ # Deploy WebLogic Server Dynamic Cluster
+ echo 'Deploying WebLogic Server Dynamic Cluster...'
+ echo 'current path is: $(pwd)'
+ echo 'artifactName is: ${{ needs.preflight.outputs.artifactName }}'
+ az deployment group create \
+ --verbose \
+ --resource-group '$resourceGroup' \
+ --name wls-dycluster-node \
+ --parameters @'${{ env.offerPath }}/test/parameters-deploy-${{ github.job }}.json' \
+ --template-file '${{needs.preflight.outputs.artifactName}}/mainTemplate.json'
+
+ # Get admin VM name
+ adminVMName=\$(az deployment group show \
+ --resource-group '$resourceGroup' \
+ --name wls-dycluster-node \
+ --query 'properties.outputs.adminVMName.value' -o tsv)
+
+ # Verify wls admin services
+ echo 'Verifying WebLogic admin services...'
+ message=\$(az vm run-command invoke \
+ --resource-group '$resourceGroup' \
+ --name \$adminVMName \
+ --command-id RunShellScript \
+ --scripts @'${{ env.offerPath }}/test/scripts/verify-admin-services.sh' \
+ --query value[*].message)
+
+ if [[ \$message == *'not in active (running) state'* ]]; then
+ echo 'Error: \$message'
+ exit 1
+ fi
+
+ # Verify wls managed server services
+ echo 'Verifying WebLogic managed server services...'
+ managedServerVMNamePrefix=\$(az deployment group show \
+ --resource-group '$resourceGroup' \
+ --name wls-dycluster-node \
+ --query 'properties.outputs.managedServerVMNamePrefix.value' -o tsv)
+
+ managedServer1=\"\${managedServerVMNamePrefix}1\"
+ message=\$(az vm run-command invoke \
+ --resource-group '$resourceGroup' \
+ --name \$managedServer1 \
+ --command-id RunShellScript \
+ --scripts @'${{ env.offerPath }}/test/scripts/verify-node-services.sh' \
+ --query value[*].message)
+
+ if [[ \$message == *'not in active (running) state'* ]]; then
+ echo 'Error: \$message'
+ exit 1
+ fi
+
+ # Get public IP
+ publicIP=\$(az vm show \
+ --resource-group '$resourceGroup' \
+ --name \$adminVMName -d \
+ --query publicIps -o tsv)
+
+ # Verify WebLogic Server Access
+ echo 'Verifying WebLogic Server Access...'
+ bash '${{ env.offerPath }}/test/scripts/verify-wls-access.sh' <<< \"\$publicIP ${adminConsolePort} $wlsUserName $wlsPassword $managedServers\"
+
+ # Verify WebLogic Managed Server LifeCycle
+ echo 'Verifying WebLogic managed server lifecycle...'
+ bash '${{ env.offerPath }}/test/scripts/verify-servers-lifecycle.sh' <<< \"$wlsUserName $wlsPassword \$publicIP ${adminConsolePort} $managedServers\"
+
+ # Query admin VM DNS
+ echo 'Querying admin VM DNS...'
+ adminNicId=\$(az vm show \
+ --resource-group '$resourceGroup' \
+ --name \$adminVMName \
+ --query networkProfile.networkInterfaces[0].id -o tsv)
+ adminPublicIPId=\$(az network nic show --id \${adminNicId} --query ipConfigurations[0].publicIPAddress.id -o tsv)
+ adminVMDNS=\$(az network public-ip show \
+ --id \"\${adminPublicIPId}\" \
+ --query dnsSettings.fqdn -o tsv)
+
+ # Deploy WebLogicCafe app
+ echo 'Deploying WebLogicCafe app...'
+ timeout 6m sh -c 'until nc -zv \$0 \$1; do echo \"nc rc: \$?\"; sleep 5; done' \${adminVMDNS} ${adminConsolePort}
+ bash '${{ env.offerPath }}/test/scripts/deploy-webapp.sh' <<< \"$wlsUserName $wlsPassword \${adminVMDNS} ${adminConsolePort}\"
+
+ # Query OHS Access URL
+ echo 'Querying OHS Access URL...'
+ ohsAccessURL=\$(az deployment group show \
+ --resource-group '$resourceGroup' \
+ --name wls-dycluster-node \
+ --query 'properties.outputs.ohsAccessURL.value' -o tsv)
+
+ # Verify WebLogicCafe app deployment
+ echo 'Verifying WebLogicCafe app deployment...'
+ bash '${{ env.offerPath }}/test/scripts/verify-webapp-deployment.sh' <<< \"\${ohsAccessURL}\"
+
+ echo 'SUCCESS: All verification steps passed!'
+ exit 0
+ "; then
+ echo "✅ SUCCESS: WebLogic deployment succeeded with image: $image"
+ echo "successful_image=$image" >> $GITHUB_ENV
+ echo "successful_resource_group=$resourceGroup" >> $GITHUB_ENV
+ success=true
+
+ # Clean up successful deployment
+ echo "Cleaning up resource group: $resourceGroup"
+ az group delete --yes --no-wait --verbose --name "$resourceGroup" || true
+
+ break
+ else
+ echo "❌ FAILED: WebLogic deployment failed with image: $image, trying next..."
+ # Clean up failed deployment
+ echo "Cleaning up failed resource group: $resourceGroup"
+ # az group delete --yes --no-wait --verbose --name "$resourceGroup" || true
+ fi
+ echo "::endgroup::"
+ done
+
+ if [ "$success" = "false" ]; then
+ echo "💥 All images failed!"
+ exit 1
+ else
+ echo "🎉 Workflow succeeded with image: ${{ env.successful_image }}"
+ fi
+
+
+ cleanup-github-resource:
+ needs: deploy-weblogic-cluster
+ if: always()
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout ${{env.repoName}}
+ uses: actions/checkout@v2
+ with:
+ repository: ${{ env.repoOwner }}/${{env.repoName}}
+ path: ${{env.repoName}}
+ - name: Delete testing branch
+ run: |
+ cd ${{env.repoName}}
+ git push https://$gitToken@github.com/${GITHUB_REPOSITORY}.git -f --delete $testbranchName
+
+ cleanup-az-resource:
+ if: always()
+ needs: deploy-weblogic-cluster
+ runs-on: ubuntu-latest
+ steps:
+ - uses: azure/login@v1
+ id: azure-login
+ with:
+ creds: ${{ secrets.AZURE_CREDENTIALS }}
+ - name: Delete DB Resource Group
+ id: delete-db-resource-group
+ run: |
+ echo "delete... " $resourceGroup
+ az group delete --yes --no-wait --verbose --name ${{ env.resourceGroupForDependency }}
+ - name: Delete Resource Group
+ id: delete-resource-group
+ run: |
+ echo "delete resource group with prefix:" ${{ env.resourceGroupPrefix }}
+ az group list --query "[?starts_with(name, '${{ env.resourceGroupPrefix }}')].[name]" -o tsv | xargs -I {} az group delete --name {} --yes --no-wait
+
+ summary:
+ needs: deploy-weblogic-cluster
+ if: always()
+ runs-on: ubuntu-latest
+ steps:
+ - name: summarize jobs
+ if: ${{ github.repository_owner == 'wls-eng' }}
+ run: |
+ workflow_jobs=$(curl -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/${GITHUB_REPOSITORY}/actions/runs/${{ github.run_id }}/jobs)
+ critical_job_num=$(echo $workflow_jobs | jq '.jobs | map(select(.name|test("^deploy-weblogic-cluster."))) | length')
+ echo "$critical_job_num"
+ succeed_critical_job_num=$(echo $workflow_jobs | jq '.jobs | map(select(.conclusion=="success") | select(.name|test("^deploy-weblogic-cluster."))) | length')
+ echo "$succeed_critical_job_num"
+ failed_job_num="$(($critical_job_num-$succeed_critical_job_num))"
+ echo $failed_job_num
+ if (($failed_job_num >= 2));then
+ echo "too many jobs failed, send notification to Teams"
+ curl ${{ secrets.MSTEAMS_WEBHOOK }} \
+ -H 'Content-Type: application/json' \
+ --data-binary @- << EOF
+ {
+ "@context":"http://schema.org/extensions",
+ "@type":"MessageCard",
+ "text":"$failed_job_num jobs failed in Dynamic Cluster Offer's workflow, please take a look at: https://github.com/${GITHUB_REPOSITORY}/actions/runs/${{ github.run_id }}"
+ }
+ EOF
+ fi
diff --git a/README.md b/README.md
index 3d6d7fcf9..f78551fdc 100644
--- a/README.md
+++ b/README.md
@@ -2,16 +2,249 @@
As part of a broad-ranging partnership between Oracle and Microsoft, this project offers support for running Oracle WebLogic Server in the Azure Virtual Machines and Azure Kubernetes Service (AKS). The partnership includes joint support for a range of Oracle software running on Azure, including Oracle WebLogic, Oracle Linux, and Oracle DB, as well as interoperability between Oracle Cloud Infrastructure (OCI) and Azure.
+## Integration tests report
+* [](https://github.com/oracle/weblogic-azure/actions/workflows/it-validation-build.yaml)
+* [](https://github.com/oracle/weblogic-azure/actions/workflows/it-validation-aks.yaml)
+* [](https://github.com/oracle/weblogic-azure/actions/workflows/it-validation-vm-admin.yaml)
+* [](https://github.com/oracle/weblogic-azure/actions/workflows/it-validation-vm-cluster.yaml)
+* [](https://github.com/oracle/weblogic-azure/actions/workflows/it-validation-vm-dynamic-cluster.yaml)
+
## Installation
The [Azure Marketplace WebLogic Server Offering](https://azuremarketplace.microsoft.com/en-us/marketplace/apps?search=WebLogic) offers a simplified UI and installation experience over the full power of the Azure Resource Manager (ARM) template.
## Documentation
-Please refer to the README for [documentation on WebLogic Server running on an Azure Kubernetes Service](https://github.com/oracle/weblogic-azure/weblogic-azure-aks/README.md)
+Please refer to the README for [documentation on WebLogic Server running on an Azure Kubernetes Service](https://oracle.github.io/weblogic-kubernetes-operator/userguide/aks/)
Please refer to the README for [documentation on WebLogic Server running on an Azure Virtual Machine](https://docs.oracle.com/en/middleware/standalone/weblogic-server/wlazu/get-started-oracle-weblogic-server-microsoft-azure-iaas.html#GUID-E0B24A45-F496-4509-858E-103F5EBF67A7)
+## Local Build Setup and Requirements
+
+This project utilizes [GitHub Packages](https://github.com/features/packages) for hosting and retrieving some dependencies. To ensure you can smoothly run and build the project in your local environment, specific configuration settings are required.
+
+GitHub Packages requires authentication to download or publish packages. Therefore, you need to configure your Maven `settings.xml` file to authenticate using your GitHub credentials. The primary reason for this is that GitHub Packages does not support anonymous access, even for public packages.
+
+Please follow these steps:
+
+1. Create a Personal Access Token (PAT)
+ - Go to [Personal access tokens](https://github.com/settings/tokens).
+ - Click on Generate new token.
+ - Give your token a descriptive name, set the expiration as needed, and select the scopes (read:packages, write:packages).
+ - Click Generate token and make sure to copy the token.
+
+2. Configure Maven Settings
+ - Locate or create the settings.xml file in your .m2 directory(~/.m2/settings.xml).
+ - Add the GitHub Package Registry server configuration with your username and the PAT you just created. It should look something like this:
+ ```xml
+
+
+
+
+
+
+ github
+ YOUR_GITHUB_USERNAME
+ YOUR_PERSONAL_ACCESS_TOKEN
+
+
+
+
+
+
+ ```
+
+
+## Deployment Description
+
+### WLS on VMs
+
+#### Oracle WebLogic Server Single Node
+
+The offer provisions the following Azure resources based on Oracle WebLogic Server base images and an Oracle WebLogic Server Enterprise Edition (WLS) without domain configuration.
+
+- The offer includes a choice of operating system, JDK, Oracle WebLogic Server versions.
+ - OS: Oracle Linux or Red Hat Enterprise Linux
+ - JDK: Oracle JDK 8, or 11
+ - WLS version: 12.2.1.4, 14.1.1.0
+- Computing resources
+ - A VM with the following configurations:
+ - Operating system as described in the selected base image.
+ - Choice of VM size.
+ - An OS disk attached to the VM.
+- Network resources
+ - A virtual network and a subnet.
+ - A network security group.
+ - A network interface.
+ - A public IP address assigned to the network interface.
+- Storage resources
+ - An Azure Storage Account to store the VM diagnostics profile.
+- Key Software components
+ - Oracle WebLogic Server Enterprise Edition. Version as described in the selected base image. The **ORACLE_HOME** is **/u01/app/wls/install/oracle/middleware/oracle_home**.
+ - Oracle JDK. The version as described in the selected base image. The **JAVA_HOME** is **/u01/app/jdk/jdk-${version}**.
+ - In addition to the database drivers that come standard with WLS, the offer includes the most recent supported PostgreSQL JDBC driver and Microsoft SQL JDBC driver. The drivers are stored in **/u01/app/wls/install/oracle/middleware/oracle_home/wlserver/server/lib/**.
+
+#### Oracle WebLogic Server with Admin Server
+
+The offer provisions Oracle WebLogic Server (WLS) with a domain and Administration Server. All supporting Azure resources are automatically provisioned.
+
+- The offer includes a choice of operating system, JDK, Oracle WLS versions.
+ - OS: Oracle Linux or Red Hat Enterprise Linux
+ - JDK: Oracle JDK 8, or 11
+ - WLS version: 12.2.1.4, 14.1.1.0
+- Computing resources
+ - VM with the followings configuration:
+ - A VM to run the Administration Server.
+ - Choice of VM size.
+ - An OS disk attached to the VM.
+- Network resources
+ - A virtual network and a subnet. If desired, you can deploy into a pre-existing virtual network.
+ - A network security group if creating a new virtual network.
+ - Network interface for VM.
+ - Public IP address.
+- Key software components
+ - Oracle WLS Enterprise Edition. Version as described in the selected base image. The **ORACLE_HOME** is **/u01/app/wls/install/oracle/middleware/oracle_home**.
+ - Oracle JDK. The version as described in the selected base image. The **JAVA_HOME** is **/u01/app/jdk/jdk-${version}**.
+ - A WLS domain with the Administration Server up and running. Admin server sign in with the user name and password provided to the offer. The default domain name is **adminDomain**, the domain path is **/u01/domains/adminDomain/**.
+- Database connectivity
+ - The offer provides database connectivity for PostgreSQL, Oracle database, Azure SQL, MySQL, or an arbitrary JDBC compliant database.
+ - Some database options support Azure Passwordless database connection.
+- Access URLs
+ - See the deployment outputs for access URLs.
+
+#### Oracle WebLogic Server Cluster
+
+The offer provisions Oracle WebLogic Server (WLS) Enterprise Edition with a domain, the Administration Server and a configured cluster. All supporting Azure resources are automatically provisioned.
+
+- The offer includes a choice of operating system, JDK, WLS versions.
+ - OS: Oracle Linux or Red Hat Enterprise Linux
+ - JDK: Oracle JDK 8, or 11
+ - WLS version: 12.2.1.4, 14.1.1.0
+- Computing resources
+ - VMs with the followings configurations:
+ - A VM to run the Administration Server and VMs to run Managed Servers.
+ - VMs to run Coherence Cache servers.
+ - Choice of VM size.
+ - An OS disk attached to the VM.
+- Load Balancer
+ - If desired, an Azure Application Gateway (agw). The TLS/SSL certificate for the agw can be uploaded, retrieved from a key vault, or self-signed auto-generated.
+- Network resources
+ - A virtual network and a subnet. If desired, you can deploy into a pre-existing virtual network.
+ - A network security group if creating a new virtual network.
+ - Network interfaces for VMs.
+ - Public IP addresses assigned to the network interfaces
+ - Public IP assigned for agw, if desired.
+- High Availability
+ - An Azure Availability Set for the VMs.
+- Key software components
+ - WLS Enterprise Edition. Version as described in the selected base image. The **ORACLE_HOME** is **/u01/app/wls/install/oracle/middleware/oracle_home**.
+ - Oracle JDK. The version as described in the selected base image. The **JAVA_HOME** is **/u01/app/jdk/jdk-${version}***.
+ - A WLS domain with the Administration Server up and running. Admin server sign in with the user name and password provided to the offer. The default domain name is **wlsd**, the domain path is **/u01/domains/wlsd/**.
+ - A configured cluster with Managed Servers running. The number of managed servers is specified in the UI when deploying the offer.
+ - Coherence Cache. If you select to enable Coherence Cache, the offer creates a data tier configured with Managed Coherence cache servers.
+- Database connectivity
+ - The offer provides database connectivity for PostgreSQL, Oracle database, Azure SQL, MySQL, or an arbitrary JDBC compliant database.
+ - Some database options support Azure Passwordless database connection.
+- Access URLs
+ - See the deployment outputs for access URLs.
+
+#### Oracle WebLogic Server Dynamic Cluster
+
+The offer provisions Oracle WebLogic Server (WLS) Enterprise Edition with a domain, the Administration Server and a dynamic cluster. All supporting Azure resources are automatically provisioned.
+
+- The offer includes a choice of operating system, JDK, WLS versions.
+ - OS: Oracle Linux or Red Hat Enterprise Linux
+ - JDK: Oracle JDK 8, or 11
+ - WLS version: 12.2.1.4, 14.1.1.0
+- The offer includes the choice of the following Oracle HTTP Server (OHS) base images
+ - OS: Oracle Linux
+ - OHS version 12.2.1.4.0
+- Computing resources
+ - VMs for WLS:
+ - A VM to run the Administration Server and VMs to run Managed Servers.
+ - VMs to run Coherence Cache servers.
+ - Choice of VM size.
+ - An OS disk attached to the VM.
+ - VM for OHS, if desired:
+ - Choice of VM size.
+ - An OS disk attached to the VM.
+- Load Balancer
+ - If desired, an OHS. The TLS/SSL certificate for the OHS can be uploaded, or retrieved from a key vault.
+- Network resources
+ - A virtual network and a subnet. If desired, you can deploy into a pre-existing virtual network.
+ - A network security group if creating a new virtual network.
+ - Network interfaces for VMs.
+ - Public IP addresses assigned to the network interfaces.
+ - A public IP assigned OHS, if desired.
+- Storage resources
+ - An Azure Storage Account and a file share named **wlsshare**. The mount point is **/mnt/wlsshare**.
+ - The storage account is also used to store the diagnostics profile of the VMs.
+ - A private endpoint in the same subnet with the VM, which allows the VM to access the file share.
+- Key software components for WLS
+ - WLS Enterprise Edition. Version as described in the selected base image. The **ORACLE_HOME** is **/u01/app/wls/install/oracle/middleware/oracle_home**.
+ - Oracle JDK. The version as described in the selected base image. The **JAVA_HOME** is **/u01/app/jdk/jdk-${version}**.
+ - A WLS domain with the Administration Server up and running. Admin server sign in with the user name and password provided to the offer. The default domain name is **wlsd**, the domain path is **/u01/domains/wlsd/**.
+ - A dynamic cluster with desired number of Managed Servers running. The number of Managed servers is specified by **Initial Dynamic Cluster Size**. The cluster size is specified by **Maximum Dynamic Cluster Size**.
+ - Coherence Cache. If you select to enable Coherence Cache, the offer creates a data tier configured with Managed Coherence cache servers.
+- Key software components for OHS
+ - Version as described in the selected base image. The **ORACLE_HOME** is **/u01/app/ohs/install/oracle/middleware/oracle_home**.
+ - Oracle JDK. The version as described in the selected base image. The **JAVA_HOME** is **/u01/app/jdk/jdk-${version}**.
+ - A domain is configured based on the node manager user name and credentials provided by the user. The default domain name is **ohsStandaloneDomain**, the domain path is **/u01/domains/ohsStandaloneDomain/**.
+ - An Oracle HTTP Server Component with default name **ohs_component**.
+- Database connectivity
+ - The offer provides database connectivity for PostgreSQL, Oracle database, Azure SQL, MySQL, or an arbitrary JDBC compliant database.
+ - Some database options support Azure Passwordless database connection.
+- Access URLs
+ - See the deployment outputs for access URLs.
+
+### WLS on AKS
+
+The offer provisions an Oracle WebLogic Server Enterprise Edition (WLS) and supporting Azure resources. WLS is configured with a domain, the Administration Server and a dynamic cluster set up and running.
+
+- The offer includes the choice of the following WLS container images
+ - Images from Oracle Container Registry (OCR) (General or Patched images)
+ - OS: Oracle Linux or Red Hat Enterprise Linux
+ - JDK: Oracle JDK 8, or 11
+ - WLS version: 12.2.1.4, 14.1.1.0
+ - You can specify any arbitrary docker image tag that is available from OCR.
+ - An image from your own Azure Container Registry.
+- Computing resources
+ - Azure Kubernetes Service cluster
+ - Dynamically created AKS cluster with
+ - Choice of Node count.
+ - Choice of Node size.
+ - Network plugin: Azure CNI.
+ - If desired, you can also deploy into a pre-existing AKS cluster.
+ - An Azure Container Registry. If desired, you can select a pre-existing Azure Container Registry.
+- Network resources
+ - A virtual network and a subnet. If desired, you can deploy into a pre-existing virtual network.
+ - Public IP addresses assigned to the managed load balancer and Azure Application Gateway, if selected.
+- Load Balancer
+ - Choice of Azure Application Gateway (agw) or standard load balancer service. With agw, you can upload TLS/SSL certificate, use a certificates stored in a key vault, or allow a self-signed certificate to be generated and installed.
+- Storage resources
+ - An Azure Storage Account and a file share named weblogic if you select to create Persistent Volume using Azure File share service. The mount point is **/shared**.
+- Monitoring resources
+ - If desired, Azure Container Insights and workspace.
+- Key software components
+ - Oracle WebLogic Server Enterprise Edition. The **ORACLE_HOME** is **/u01/app/wls/install/oracle/middleware/oracle_home**.
+ - This offer always deploys WLS using the 'Model in image' domain home source type. For more information, see the documentation from Oracle.
+ - WebLogic Kubernetes Operator
+ - Oracle JDK. The **JAVA_HOME** is **/u01/app/jdk/jdk-${version}**.
+ - A WLS domain with the Administration Server up configured based on the provided Administrator user name and credentials. The default domain name is sample-domain1, the domain path is **/u01/domains/sample-domain1/**.
+ - A dynamic cluster with Managed Servers running. The number of initial and maximum number of Managed Servers are configurable.
+- Database connectivity
+ - The offer provides database connectivity for PostgreSQL, Oracle database, Azure SQL, MySQL, or an arbitrary JDBC compliant database.
+ - Some database options support Azure Passwordless database connection.
+- Access URLs
+ - See the deployment outputs for access URLs.
+
## Examples
To get details of how to run Oracle WebLogic Server on Azure Virtual Machines refer to the blog [WebLogic on Azure Virtual Machines Major Release Now Available](https://blogs.oracle.com/weblogicserver/weblogic-on-azure-virtual-machines-major-release-now-available).
diff --git a/SECURITY.md b/SECURITY.md
index 3c4ad917a..2ca81027f 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -21,9 +21,7 @@ security features are welcome on GitHub Issues.
Security updates will be released on a regular cadence. Many of our projects
will typically release security fixes in conjunction with the
-[Oracle Critical Patch Update][3] program. Security updates are released on the
-Tuesday closest to the 17th day of January, April, July and October. A pre-release
-announcement will be published on the Thursday preceding each release. Additional
+Oracle Critical Patch Update program. Additional
information, including past advisories, is available on our [security alerts][4]
page.
diff --git a/pom.xml b/pom.xml
new file mode 100644
index 000000000..2b3b164cc
--- /dev/null
+++ b/pom.xml
@@ -0,0 +1,97 @@
+
+
+
+
+ 4.0.0
+
+
+ com.microsoft.azure.iaas
+ azure-javaee-iaas-parent
+ 1.0.22
+
+
+ com.oracle.weblogic.azure
+ weblogic-azure
+ pom
+ 1.0.0
+ ${project.artifactId}
+ https://github.com/oracle/weblogic-azure
+
+
+
+ github
+ GitHub Packages
+ https://maven.pkg.github.com/azure-javaee/azure-javaee-iaas
+
+
+
+
+
+ github
+ GitHub Packages
+ https://maven.pkg.github.com/azure-javaee/azure-javaee-iaas
+
+
+
+
+
+
+ 1.0.93
+
+ 1.0.32
+ 1.0.58
+ 1.0.740000
+ 1.0.55
+
+ 1.0.7
+ 1.0.3
+ 1.0.3
+
+ 1.0.28
+ 1.0.3
+ 1.0.3
+
+
+
+ main
+
+ weblogic-azure
+
+ oracle
+
+ https://raw.githubusercontent.com/${git.repo.owner}/${git.repo}/${git.tag}/weblogic-azure-vm
+
+ ${project.basedir}
+
+ ${module.basedir}/../arm-ttk/arm-ttk
+
+ file:///${module.basedir}/resources/azure-common.properties
+ file:///${module.basedir}/resources/pid.properties
+ file:///${module.basedir}/resources/microsoft-pid.properties
+
+
+
+
+ true
+
+ true
+
+ true
+
+
+
+
+
+ weblogic-azure-aks
+ weblogic-azure-vm
+
+
+
diff --git a/resources/azure-common.properties b/resources/azure-common.properties
new file mode 100644
index 000000000..7eb83b106
--- /dev/null
+++ b/resources/azure-common.properties
@@ -0,0 +1,85 @@
+## Use the following command to get the latest 10 API versions for a given resource type
+## ```bash
+## export NameSpace=
+## export ResourceType=
+## az provider show --namespace ${NameSpace} --query "resourceTypes[?resourceType=='${ResourceType}'].apiVersions[:10]" \
+## | jq -r '.[][] | select(test("preview$"; "i") | not)' | head -n 10
+## ```
+##
+
+# Microsoft.Authorization/roleAssignments
+azure.apiVersionForRoleAssignment=2022-04-01
+# Microsoft.Authorization/roleDefinitions
+azure.apiVersionForRoleDefinitions=2022-04-01
+# Microsoft.ContainerRegistry/registries
+azure.apiVersionForContainerRegistries=2023-07-01
+# Microsoft.ContainerService/managedClusters
+azure.apiVersionForManagedClusters=2023-10-01
+# Microsoft.Compute/availabilitySets
+azure.apiVersionForAvailabilitySets=2024-11-01
+# Microsoft.Compute/virtualMachines
+azure.apiVersionForVirtualMachines=2024-11-01
+# Microsoft.KeyVault/vaults
+azure.apiVersionForKeyVault=2024-11-01
+# Microsoft.KeyVault/vaults/secrets
+azure.apiVersionForKeyVaultSecrets=2024-11-01
+# Microsoft.ManagedIdentity/userAssignedIdentities
+azure.apiVersionForIdentity=2023-01-31
+# Microsoft.Network/networkInterfaces
+azure.apiVersionForNetworkInterfaces=2023-06-01
+# Microsoft.Network/networkSecurityGroups
+azure.apiVersionForNetworkSecurityGroups=2023-06-01
+# Microsoft.Network/privateEndpoints
+azure.apiVersionForPrivateEndpoint=2023-06-01
+# Microsoft.Network/publicIPAddresses
+azure.apiVersionForPublicIPAddresses=2023-06-01
+# Microsoft.Network/applicationGateways
+azure.apiVersionForApplicationGateways=2023-06-01
+# Microsoft.Network/dnszones
+azure.apiVersionForDNSZone=2023-07-01-preview
+# Microsoft.Network/virtualNetworks
+azure.apiVersionForVirtualNetworks=2023-06-01
+# Microsoft.OperationalInsights/workspaces
+azure.apiVersionForInsightsWorkspaces=2022-10-01
+# Microsoft.Resources/deploymentScripts
+azure.apiVersionForDeploymentScript=2023-08-01
+# Microsoft.Resources/deployments
+azure.apiVersionForDeployment=2023-07-01
+# Microsoft.Resources/tags
+azure.apiVersionForTags=2023-07-01
+# Microsoft.Storage/storageAccounts
+azure.apiVersionForStorage=2025-01-01
+# Microsoft.Storage/storageAccounts/fileServices
+azure.apiVersionForStorageFileService=2025-01-01
+# Microsoft.Monitor/accounts
+azure.apiVersionForMonitorAccount=2023-04-03
+
+# AzureAzCLI version
+azure.cli.version=2.53.0
+# AzurePowerShell version
+azure.powershell.version=11.5
+
+# These filters are used to implement tags for resources. Other occurrences of these resource type identifiers
+# are intentionally not filtered because doing so would unnecessarily complicate the code.
+identifier.accounts=Microsoft.Monitor/accounts
+identifier.applicationGateways=Microsoft.Network/applicationGateways
+identifier.availabilitySets=Microsoft.Compute/availabilitySets
+identifier.dnszones=Microsoft.Network/dnszones
+identifier.managedClusters=Microsoft.ContainerService/managedClusters
+identifier.networkInterfaces=Microsoft.Network/networkInterfaces
+identifier.networkSecurityGroups=Microsoft.Network/networkSecurityGroups
+identifier.publicIPAddresses=Microsoft.Network/publicIPAddresses
+identifier.privateEndpoints=Microsoft.Network/privateEndpoints
+identifier.registries=Microsoft.ContainerRegistry/registries
+identifier.storageAccounts=Microsoft.Storage/storageAccounts
+identifier.vaults=Microsoft.KeyVault/vaults
+identifier.virtualNetworks=Microsoft.Network/virtualNetworks
+identifier.virtualMachines=Microsoft.Compute/virtualMachines
+identifier.virtualMachinesExtensions=Virtual machine extension
+identifier.workspaces=Microsoft.OperationalInsights/workspaces
+identifier.deploymentScripts=Microsoft.Resources/deploymentScripts
+identifier.userAssignedIdentities=Microsoft.ManagedIdentity/userAssignedIdentities
+identifier.resourcesDeployment=Microsoft resources deployment
+label.tagsLabel=Tags for the resources.
+
+azure.armBased.vmSize.list="Standard_D2plds_v5","Standard_D4plds_v5","Standard_D8plds_v5","Standard_D16plds_v5","Standard_D32plds_v5","Standard_D48plds_v5","Standard_D64plds_v5","Standard_D2pls_v5","Standard_D4pls_v5","Standard_D8pls_v5","Standard_D16pls_v5","Standard_D32pls_v5","Standard_D48pls_v5","Standard_D64pls_v5","Standard_D2pds_v5","Standard_D4pds_v5","Standard_D8pds_v5","Standard_D16pds_v5","Standard_D32pds_v5","Standard_D48pds_v5","Standard_D64pds_v5","Standard_D2ps_v5","Standard_D4ps_v5","Standard_D8ps_v5","Standard_D16ps_v5","Standard_D32ps_v5","Standard_D48ps_v5","Standard_D64ps_v5","Standard_E2pds_v5","Standard_E4pds_v5","Standard_E8pds_v5","Standard_E16pds_v5","Standard_E20pds_v5","Standard_E32pds_v5","Standard_E2ps_v5","Standard_E4ps_v5","Standard_E8ps_v5","Standard_E16ps_v5","Standard_E20ps_v5","Standard_E32ps_v5","Standard_B2pls_v2","Standard_B2ps_v2","Standard_B2pts_v2","Standard_B4pls_v2","Standard_B4ps_v2","Standard_B8pls_v2","Standard_B8ps_v2","Standard_B16pls_v2","Standard_B16ps_v2","Standard_D2pls_v6","Standard_D4pls_v6","Standard_D8pls_v6","Standard_D16pls_v6","Standard_D32pls_v6","Standard_D48pls_v6","Standard_D64pls_v6","Standard_D96pls_v6","Standard_D2pds_v6","Standard_D4pds_v6","Standard_D8pds_v6","Standard_D16pds_v6","Standard_D32pds_v6","Standard_D48pds_v6","Standard_D64pds_v6","Standard_D96pds_v6","Standard_D2plds_v6","Standard_D4plds_v6","Standard_D8plds_v6","Standard_D16plds_v6","Standard_D32plds_v6","Standard_D48plds_v6","Standard_D64plds_v6","Standard_D96plds_v6","Standard_D2ps_v6","Standard_D4ps_v6","Standard_D8ps_v6","Standard_D16ps_v6","Standard_D32ps_v6","Standard_D48ps_v6","Standard_D64ps_v6","Standard_D96ps_v6","Standard_E2ps_v6","Standard_E4ps_v6","Standard_E8ps_v6","Standard_E16ps_v6","Standard_E32ps_v6","Standard_E48ps_v6","Standard_E64ps_v6","Standard_E96ps_v6","Standard_E2pds_v6","Standard_E4pds_v6","Standard_E8pds_v6","Standard_E16pds_v6","Standard_E32pds_v6","Standard_E48pds_v6","Standard_E64pds_v6","Standard_E96pds_v6"
diff --git a/resources/doc/guidance-for-tagging-resource.md b/resources/doc/guidance-for-tagging-resource.md
new file mode 100644
index 000000000..ab2ec4c1c
--- /dev/null
+++ b/resources/doc/guidance-for-tagging-resource.md
@@ -0,0 +1,201 @@
+
+# Guidance on Applying Tags in Solution Templates
+
+## What are Tags in this context and why are they useful?
+
+Tags are arbitrary name=value pairs that can be associated with most Azure resources. Azure features such as Azure Policy can use Tags to enforce cloud governance policies. For more about tags, see [Use tags to organize your Azure resources and management hierarchy](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/tag-resources).
+
+## Step 1: Audit Resources Created in the Offer
+
+To determine the resources that will be created in your offer, use the following commands based on the template type:
+
+### For ARM Templates:
+Use the command below to list resource types:
+
+```bash
+# Navigate to the offer folder
+cd offer-folder
+grep -rh "\"type\": \"Microsoft" --exclude="createUiDefinition.json" | sort | uniq | sed 's/^[ \t]*//'
+```
+
+### For Bicep Templates:
+Use the command below to list resource types and remove duplicates:
+
+```bash
+# Navigate to the offer folder
+cd offer-folder
+grep -rh "^resource" | grep "Microsoft." | sort | uniq | sed 's/^[ \t]*//'
+```
+
+Identify which resources support tags and which do not. For resources not listed below, consult the ARM definition at [Azure Resource Manager templates](https://learn.microsoft.com/en-us/azure/templates/) to determine if tagging is supported. If the definition does not include a tags property, the resource does not support tags and tagging is not required for deployments.
+
+### Resources that Support Tags:
+
+The top-level resources will be listed in the Tag UI control. Sub-resources will inherit the same tags as their parent resources.
+
+For example, in the UI definition, customers can specify tags for `Microsoft.KeyVault/vaults`, but not for `Microsoft.KeyVault/vaults/secrets`. For the deployment of `Microsoft.KeyVault/vaults/secrets`, the same tags applied to `Microsoft.KeyVault/vaults` will be used. This approach ensures a consistent tagging experience with Key Vault deployments in the Azure portal.
+
+- Microsoft.Network/dnszones
+- Microsoft.Network/networkInterfaces
+- Microsoft.Network/networkSecurityGroups
+- Microsoft.Network/publicIPAddresses
+- Microsoft.Network/privateEndpoints
+- Microsoft.Storage/storageAccounts
+- Microsoft.KeyVault/vaults
+ - Microsoft.KeyVault/vaults/secrets
+- Microsoft.Network/virtualNetworks
+- Microsoft.Compute/virtualMachines
+- Microsoft.Compute/virtualMachines/extensions
+- Microsoft.Resources/deploymentScripts
+- Microsoft.ManagedIdentity/userAssignedIdentities
+- Microsoft.Resources/deployments
+- Microsoft.Network/applicationGateways
+
+### Resources that Do Not Support Tags:
+
+- Microsoft.Storage/storageAccounts/fileServices
+- Microsoft.Storage/storageAccounts/fileServices/shares
+- Microsoft.Network/networkSecurityGroups/securityRules
+- Microsoft.Network/dnsZones/A
+- Microsoft.Network/dnszones/CNAME
+- Microsoft.Network/virtualNetworks/subnets
+- Microsoft.Authorization/roleAssignments
+- Microsoft.Network/loadBalancers/backendAddressPools
+- Microsoft.Network/applicationGateways/backendHttpSettingsCollection
+- Microsoft.Network/applicationGateways/frontendIPConfigurations
+- Microsoft.Network/applicationGateways/frontendPorts
+- Microsoft.Network/applicationGateways/gatewayIPConfigurations
+- Microsoft.Network/applicationGateways/httpListeners
+- Microsoft.Network/applicationGateways/probes
+- Microsoft.Network/applicationGateways/requestRoutingRules
+
+## Step 2: Tag UI Control
+
+Incorporate the [Microsoft.Common.TagsByResource UI element](https://learn.microsoft.com/en-us/azure/azure-resource-manager/managed-applications/microsoft-common-tagsbyresource?WT.mc_id=Portal-Microsoft_Azure_CreateUIDef0) to include resources that support tags.
+
+## Step 3: Update the Template
+
+Refer to this [pull request](https://github.com/oracle/weblogic-azure/pull/327/) as a guide for how to apply tags to the resource deployments.
+
+Notes:
+
+For AKS, make sure the tag is applied to agent pool and node pool. The whole structure looks like:
+
+```bicep
+resource symbolicname 'Microsoft.ContainerService/managedClusters@2024-06-02-preview' = {
+ name: 'string'
+ location: 'string'
+ tags: {
+ tagName1: 'tagValue1'
+ tagName2: 'tagValue2'
+ }
+
+ ...
+
+ agentPoolProfiles: {
+
+ ...
+
+ tags: {
+ tagName1: 'tagValue1'
+ tagName2: 'tagValue2'
+ }
+ }
+}
+
+```
+
+See [Microsoft.ContainerService managedClusters - Bicep](https://learn.microsoft.com/en-us/azure/templates/microsoft.containerservice/managedclusters?pivots=deployment-language-bicep) for more information.
+
+## Step 4: Testing
+
+1. **Create a Test Offer:** Set up a test offer to validate the tagging process.
+
+2. **Tag Settings:**
+ - Apply a uniform tag to all resources.
+ - Create specific tags for each resource, setting the tag value to the resource type (e.g., "tag1=storage account").
+
+3. **Deploy the Offer:**
+
+4. **Verify Tags:** Use the following command to verify that the resources have the correct tags applied:
+
+ ```bash
+ az resource list --resource-group --query "[].{Name:name, Type:type, Tags:tags}" -o json
+ ```
+
+ For example:
+
+ ```shell
+ az resource list --resource-group haiche-sn-tag-test --query "[].{Name:name, Type:type, Tags:tags}" -o json
+ [
+ {
+ "Name": "0733ecolvm",
+ "Tags": {
+ "Tag0": "All",
+ "Tag6": "storage account"
+ },
+ "Type": "Microsoft.Storage/storageAccounts"
+ },
+ {
+ "Name": "olvm_PublicIP",
+ "Tags": {
+ "Tag0": "All",
+ "Tag4": "public ip address"
+ },
+ "Type": "Microsoft.Network/publicIPAddresses"
+ },
+ {
+ "Name": "wls-nsg",
+ "Tags": {
+ "Tag0": "All",
+ "Tag3": "network security group"
+ },
+ "Type": "Microsoft.Network/networkSecurityGroups"
+ },
+ {
+ "Name": "olvm_VNET",
+ "Tags": {
+ "Tag0": "All",
+ "Tag8": "virtual network"
+ },
+ "Type": "Microsoft.Network/virtualNetworks"
+ },
+ {
+ "Name": "olvm_NIC",
+ "Tags": {
+ "Tag0": "All",
+ "Tag2": "network interface"
+ },
+ "Type": "Microsoft.Network/networkInterfaces"
+ },
+ {
+ "Name": "WeblogicServerVM",
+ "Tags": {
+ "Tag0": "All",
+ "Tag7": "virtual machine"
+ },
+ "Type": "Microsoft.Compute/virtualMachines"
+ },
+ {
+ "Name": "WeblogicServerVM_OsDisk_1_d1fed748ccaa4cac81df9179e6dff325",
+ "Tags": {
+ "Tag0": "All",
+ "Tag7": "virtual machine"
+ },
+ "Type": "Microsoft.Compute/disks"
+ }
+ ]
+ ```
+
+## Step 4: Known issues
+
+The tag is not applied to resources that are not deployed through our template, so we cannot apply tags to them from the template.
+
+Known resources:
+
+- Microsoft.Compute/virtualMachines/extensions
+ - OmsAgentForLinux
+ - MDE.Linux
+- Microsoft.AlertsManagement/prometheusRuleGroups
+- Microsoft.Insights/dataCollectionEndpoints
+- Microsoft.Insights/dataCollectionRules
diff --git a/weblogic-azure-vm/arm-oraclelinux-wls/src/main/resources/microsoft-pid.properties b/resources/microsoft-pid.properties
similarity index 61%
rename from weblogic-azure-vm/arm-oraclelinux-wls/src/main/resources/microsoft-pid.properties
rename to resources/microsoft-pid.properties
index 1a630cb37..ab91a07a4 100644
--- a/weblogic-azure-vm/arm-oraclelinux-wls/src/main/resources/microsoft-pid.properties
+++ b/resources/microsoft-pid.properties
@@ -1,22 +1,15 @@
# Copyright (c) 2021, Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
-azure.apiVersion=2020-06-01
-azure.apiVersion2=2019-06-01
-azure.apiVersionForDeploymentScript=2020-10-01
-azure.apiVersionForDNSZone=2018-05-01
-azure.apiVersionForIndentity=2018-11-30
-azure.apiVersionForKeyVault=2019-09-01
-
# Values in this file are read at build time for the other Azure Marketplace offer repositories
# This file is for pids used when testing the offers in the Microsoft internal Marketplace account.
-# Pids used in https://github.com/wls-eng/arm-oraclelinux-wls
+# Pids used in https://github.com/oracle/weblogic-azure/tree/main/weblogic-azure-vm/arm-oraclelinux-wls
end=4590f705-3e93-5945-95c3-eeb88b976091
start=7908d405-18b6-5394-988e-fb4cabdbf5e5
-# Pids used in https://github.com/wls-eng/arm-oraclelinux-wls-admin
+# Pids used in https://github.com/oracle/weblogic-azure/tree/main/weblogic-azure-vm/arm-oraclelinux-wls-admin
admin.aad.end=8295df19-fe6b-5745-ad24-51ef66522b24
admin.aad.start=fc7e031d-111d-5a3a-8b5a-e08602237dca
@@ -28,8 +21,12 @@ admin.elk.end=1e26d5dd-cc3f-5308-a858-cf3257a486ca
admin.elk.start=3dc18163-180b-56eb-adf2-501f97c88c7c
admin.end=08e3f14d-2362-5c43-8269-133a0045d223
admin.start=4b4d5bab-1032-530c-88db-ac3f7caf440d
+admin.ssl.end=5a18b3b0-d993-51b6-ac78-dbc87bb7ba65
+admin.ssl.start=2e7d27a3-f61f-54ee-bdfd-89f597d6d70c
+admin.pswless.database.end=7ebb9b3e-3465-5116-99f9-e9e092bb6198
+admin.pswless.database.start=8b595cf2-9481-5466-be25-0d1b832f22b8
-# Pids used in https://github.com/wls-eng/arm-oraclelinux-wls-cluster
+# Pids used in https://github.com/oracle/weblogic-azure/tree/main/weblogic-azure-vm/arm-oraclelinux-wls-cluster
cluster.aad.end=3fa6990d-1fcb-54e3-95b2-1ccd1ec50e37
cluster.aad.start=a714884a-c4a6-5cfd-b014-97e7618b79c1
@@ -37,6 +34,7 @@ cluster.addnode.end=279e1f09-14b3-5569-8f1f-bf2185a4c96a
cluster.addnode.start=4455d9f3-3a4c-54d2-99f8-f727c3f3dda2
cluster.addnode.coherence.end=157eac12-12ae-11eb-adc1-0242ac120002
cluster.addnode.coherence.start=157eacda-12ae-11eb-adc1-0242ac120002
+cluster.appgateway.custom.certificate=031f5ed3-892c-5efb-bd36-6db31717732d
cluster.appgateway.end=36deb858-08fe-5c07-bc77-ba957a59a080
cluster.appgateway.start=391adcc9-6421-5bf8-8960-aec850ef7b0e
cluster.appgateway.keyvault.start=512d14f0-3590-5dcb-ac53-db440d59ff3c
@@ -53,7 +51,11 @@ cluster.dns.end=916943db-498f-59d7-a410-4cf37e9ed1ad
cluster.dns.start=aa030ff6-c680-53de-8891-8dd16ce08aa6
cluster.elk.end=e4165284-b017-5df9-9b91-3f11dd8a72e5
cluster.elk.start=6890699c-97ad-5d76-91d3-3a3b7d64013f
+cluster.ssl.end=63f1d185-d25c-55db-aa31-6d732d445f61
+cluster.ssl.start=eff0c5cb-3417-5745-a20d-cf5455fd5d39
cluster.end=55160205-2f03-52c5-ae30-507952c0c4ea
+cluster.pswless.database.end=ae2f6529-815a-5d13-b6c2-91b92d267d7a
+cluster.pswless.database.start=527a8646-465b-56c7-b495-6383eeb76e2e
cluster.start=ca5e3350-ff62-5d92-83a3-acaaeae87c03
# Pids to indicate which database was chosen. No difference in these
@@ -62,8 +64,9 @@ cluster.start=ca5e3350-ff62-5d92-83a3-acaaeae87c03
database.oracle=692b2d84-72f5-5992-a15d-0d5bcfef040d
database.postgresql=935df06e-a5d5-5bf1-af66-4c1eb71dac7a
database.sqlserver=3569588c-b89d-5567-84ee-a2c633c7204c
+database.mysql=de95ae02-f841-4c48-a69e-4bf09c4271bb
-# Pids used in https://github.com/wls-eng/arm-oraclelinux-wls-dynamic-cluster
+# Pids used in https://github.com/oracle/weblogic-azure/tree/main/weblogic-azure-vm/arm-oraclelinux-wls-dynamic-cluster
dynamic.aad.end=6449f9a2-0713-5a81-a886-dce6d8d5c137
dynamic.aad.start=6245e080-ab9b-5e42-ac14-fc38cc610a11
@@ -83,20 +86,44 @@ dynamic.elk.end=6303dcc8-4ec9-5dd8-91f9-e829e86fa330
dynamic.elk.start=cf939c33-1ebe-5dbb-95d2-fbe2d5cc6a4e
dynamic.end=40a6f402-31ee-536a-a006-729105f55003
dynamic.start=07bf10d5-da4e-5113-b1c2-b8d802bda651
+dynamic.ssl.end=6714568e-9598-5cc9-8b42-3e4516c90e29
+dynamic.ssl.start=82efa164-f4e6-5dc1-93a4-51543e810225
+dynamic.pswless.database.end=b8114488-9109-5f46-a877-e86ef18d16e4
+dynamic.pswless.database.start=25b7acd8-5b7d-57c9-9c34-ad14837c39a6
# Pids to indicate which base image was chosen. No difference in these
# between Oracle and Microsoft
-from.owls-122130-8u131-ol74=caa3ea2b-cdec-55ee-8510-854ed10d7ebe
-from.owls-122130-8u131-ol73=bf1d0f1a-cb9a-5453-bf70-42b4efe8c15e
+#from.owls-122130-8u131-ol74=caa3ea2b-cdec-55ee-8510-854ed10d7ebe
+#from.owls-122130-8u131-ol73=bf1d0f1a-cb9a-5453-bf70-42b4efe8c15e
from.owls-122140-8u251-ol76=bde756bb-ce96-54d5-a478-04d9bd87e9db
from.owls-141100-8u251-ol76=b6f00a34-1478-5a10-9a84-49c4051b57b8
from.owls-141100-11_07-ol76=afc8f9c5-8c5d-5d1b-ab4d-3116ca908bfd
# Pids to indicate which latest base image was chosen. No difference in these
# between Oracle and Microsoft
-from.owls-122130-jdk8-ol74=caa3ea2b-cdec-55ee-8510-854ed10d7ebe
-from.owls-122130-jdk8-ol73=bf1d0f1a-cb9a-5453-bf70-42b4efe8c15e
+#from.owls-122130-jdk8-ol74=caa3ea2b-cdec-55ee-8510-854ed10d7ebe
+#from.owls-122130-jdk8-ol73=bf1d0f1a-cb9a-5453-bf70-42b4efe8c15e
from.owls-122140-jdk8-ol76=bde756bb-ce96-54d5-a478-04d9bd87e9db
from.owls-141100-jdk8-ol76=b6f00a34-1478-5a10-9a84-49c4051b57b8
from.owls-141100-jdk11-ol76=afc8f9c5-8c5d-5d1b-ab4d-3116ca908bfd
+from.owls-122140-jdk8-ol87=cc7ee628-3750-489c-97f5-1e484d710e69
+from.owls-122140-jdk8-ol91=92f40d92-0786-4812-8918-a6f9dcc1b4ec
+from.owls-141100-jdk8-ol87=5d011fae-34c1-4004-bd19-c2d2bccd30a4
+from.owls-141100-jdk8-ol91=9d76fbe1-3bbc-4a2b-a209-7427ad1db4ab
+from.owls-141100-jdk11-ol87=459c178d-aec6-4bb5-8e80-66291f0ee6a8
+from.owls-141100-jdk11-ol91=5ce9afe5-c458-41b6-9b46-4899cde11806
+from.owls-141200-jdk21-ol94=ce0ed319-7470-4b49-ba87-a3ebcf49b303
+from.owls-141200-jdk21-ol810=2d0a20a6-3022-4142-8f46-6f84af1adc66
+from.owls-141200-jdk17-ol94=d3acf02b-e8f3-41ca-9e62-1318ab0c023b
+from.owls-141200-jdk17-ol810=00d54634-1327-46bc-8a05-865aa648733d
+
+
+# Pids to indicate which latest base image was chosen. No difference in these
+# between Oracle and Microsoft
+from.owls-122140-jdk8-rhel76=0a52f317-8b40-4a77-9f3c-7607fc3ebfb7wls
+from.owls-141100-jdk8-rhel76=26ec5cf5-dd84-4764-97cf-4f830facbf66wls
+from.owls-141100-jdk11-rhel76=ada2e3e6-faef-4339-aaac-40bcdc4484ecwls
+from.owls-122140-jdk8-rhel87=6250babf-50f9-4462-a0c5-ebb8dc1ce5a7
+from.owls-141100-jdk8-rhel87=544e3e2c-d0a5-4615-87ff-2eadd5fefe5d
+from.owls-141100-jdk11-rhel87=7c4a9f1d-ecdf-4fd3-bb68-0a28d8bea751
\ No newline at end of file
diff --git a/weblogic-azure-vm/arm-oraclelinux-wls/src/main/resources/pid.properties b/resources/pid.properties
similarity index 57%
rename from weblogic-azure-vm/arm-oraclelinux-wls/src/main/resources/pid.properties
rename to resources/pid.properties
index 637d18d2a..183acf541 100644
--- a/weblogic-azure-vm/arm-oraclelinux-wls/src/main/resources/pid.properties
+++ b/resources/pid.properties
@@ -1,22 +1,14 @@
# Copyright (c) 2021, Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
-azure.apiVersion=2020-06-01
-azure.apiVersion2=2019-06-01
-azure.apiVersionForDeploymentScript=2020-10-01
-azure.apiVersionForDNSZone=2018-05-01
-azure.apiVersionForIndentity=2018-11-30
-azure.apiVersionForKeyVault=2019-09-01
-
-
# Values in this file are read at build time for the other Azure Marketplace offer repositories
-# Pids used in https://github.com/wls-eng/arm-oraclelinux-wls
+# Pids used in https://github.com/oracle/weblogic-azure/tree/main/weblogic-azure-vm/arm-oraclelinux-wls
-end=8f75aefd-02b2-50a8-9a6e-feb41176559f
+end=pid-a63dea86-f8db-4e75-a231-1145d4f3ab6e-partnercenter
start=b446fe15-5d43-5549-858d-4775741cd0ba
-# Pids used in https://github.com/wls-eng/arm-oraclelinux-wls-admin
+# Pids used in https://github.com/oracle/weblogic-azure/tree/main/weblogic-azure-vm/arm-oraclelinux-wls-admin
admin.aad.end=d273da6e-9128-5163-a8e7-9f76505ae5cc
admin.aad.start=0a77c1d0-cf1e-5185-89ad-230ffe24d0f8
@@ -27,9 +19,13 @@ admin.database.start=6b253477-1753-5eb1-9448-506ab47494d7
admin.elk.end=78d94c2d-7401-52a9-be03-4cf1cc4e8f00
admin.elk.start=f3c3b847-89ea-54c8-904e-10ff8eba14fa
admin.end=057f09cd-6f90-5c1c-b655-3aba168aef35
-admin.start=18f4b80a-6ca2-5840-8f88-4e7156ed6db0
+admin.start=pid-7e52c2b6-1acb-416f-af55-5837ff783eb7-partnercenter
+admin.ssl.end=7185f788-ab74-5158-8f83-4146d01cec9d
+admin.ssl.start=2730ced3-52c2-501c-94b1-bfcffa5b5bc0
+admin.pswless.database.end=89d5d2a9-16b8-5b3d-b5f7-1fb8a6cfdfc5
+admin.pswless.database.start=53e099bf-8148-5463-a51d-0272520bac60
-# Pids used in https://github.com/wls-eng/arm-oraclelinux-wls-cluster
+# Pids used in https://github.com/oracle/weblogic-azure/tree/main/weblogic-azure-vm/arm-oraclelinux-wls-cluster
cluster.aad.end=3031eeaf-f67e-5f61-8bf1-aad6006eaf6d
cluster.aad.start=cae84e84-407c-5cef-b2a0-cd95d4ff8824
@@ -37,6 +33,7 @@ cluster.addnode.end=2452bb0e-13d9-5ad3-816b-d645ba5198c4
cluster.addnode.start=ffa02caf-ecb9-59df-82ce-74b9b0247c50
cluster.addnode.coherence.end=157ea8ac-12ae-11eb-adc1-0242ac120002
cluster.addnode.coherence.start=157ea992-12ae-11eb-adc1-0242ac120002
+cluster.appgateway.custom.certificate=d7c730b9-494c-5cc2-9695-cc1057da3e0b
cluster.appgateway.end=b626bee4-b322-5418-990f-6463a799be3c
cluster.appgateway.start=4f85ce49-6de4-5925-b77c-3eb15ba97921
cluster.appgateway.keyvault.start=ed922d8d-975f-55d4-b33a-28fbace13f76
@@ -53,8 +50,12 @@ cluster.dns.end=022d99e3-8ba3-5822-b6d5-bcec64518286
cluster.dns.start=c40d7aa8-9fe7-51c6-86de-27e1b1678735
cluster.elk.end=cb84f8ed-d270-5036-8240-08b6bb4f2bd6
cluster.elk.start=c9bac63c-67eb-52e1-a8c1-2ba57f65cfbc
+cluster.ssl.end=4f29ea52-dfd6-5537-8a38-4ad9fec1b2eb
+cluster.ssl.start=e09585b6-b7cb-5437-a600-c95c88ac37b3
cluster.end=0fa14086-2d46-54a4-8aba-470addc3dce6
-cluster.start=2488df5d-5e73-5593-9d92-37b40999a9e0
+cluster.pswless.database.end=b4d91140-fb26-50de-9358-147b9dd25f7a
+cluster.pswless.database.start=340f5265-6de7-5b6f-aad3-9f179736cde0
+cluster.start=pid-7363cd91-937d-4469-a7a8-ecbeddfb7a0f-partnercenter
# Pids to indicate which database was chosen. No difference in these
# between Oracle and Microsoft
@@ -62,8 +63,9 @@ cluster.start=2488df5d-5e73-5593-9d92-37b40999a9e0
database.oracle=692b2d84-72f5-5992-a15d-0d5bcfef040d
database.postgresql=935df06e-a5d5-5bf1-af66-4c1eb71dac7a
database.sqlserver=3569588c-b89d-5567-84ee-a2c633c7204c
+database.mysql=41c353ae-6f7b-442f-b903-996cb42c1bbe
-# Pids used in https://github.com/wls-eng/arm-oraclelinux-wls-dynamic-cluster
+# Pids used in https://github.com/oracle/weblogic-azure/tree/main/weblogic-azure-vm/arm-oraclelinux-wls-dynamic-cluster
dynamic.aad.end=f5a60b13-efd6-551a-a40f-3923804e475d
dynamic.aad.start=7dcc8904-9a8a-5b71-a73d-6caa9edb78ef
@@ -84,21 +86,44 @@ dynamic.deletenode.end=3eb27f94-d1c3-572c-a7da-0d7f26f6a20e
dynamic.elk.end=d154e480-15e2-5cf7-bdd5-6219c1793967
dynamic.elk.start=bc636673-2dca-5e40-a2aa-6891c344aa17
dynamic.end=93da13bf-11f6-5bfb-9b51-7deb152a21c3
-dynamic.start=2551958c-2465-5e2e-8e28-0b3a4babf3f0
+dynamic.start=pid-89d7f595-f164-4cbd-abbb-e46cd0b342e8-partnercenter
+dynamic.ssl.end=325d251a-fcba-5bd8-ad50-5682b3565ef5
+dynamic.ssl.start=f6e45a7a-d491-58a1-a951-fad252e1a396
+dynamic.pswless.database.end=fd30aea8-ec77-557b-ba12-4de15018a64a
+dynamic.pswless.database.start=2d02881c-c7d2-5e16-b9b8-c48ebf70d2ab
# Pids to indicate which base image was chosen. No difference in these
# between Oracle and Microsoft
-from.owls-122130-8u131-ol74=ac3571f9-c12d-5caa-b886-85734693ab63
-from.owls-122130-8u131-ol73=2bd71be8-b31c-5fbf-96ba-61fde622586d
+#from.owls-122130-8u131-ol74=ac3571f9-c12d-5caa-b886-85734693ab63
+#from.owls-122130-8u131-ol73=2bd71be8-b31c-5fbf-96ba-61fde622586d
from.owls-122140-8u251-ol76=dd07bd44-828b-566a-8dc6-b84bf301bf1d
from.owls-141100-8u251-ol76=cb2af004-23c3-5c85-87b9-9de767c7a61e
from.owls-141100-11_07-ol76=632e8fde-e61f-57bf-af9d-5804bf00ecb3
# Pids to indicate which latest base image was chosen.
-from.owls-122130-jdk8-ol74=40fe0044-5739-466c-96aa-0c82ab465d0b
-from.owls-122130-jdk8-ol73=799fc764-af80-45c3-aea1-599f55901e73
+#from.owls-122130-jdk8-ol74=40fe0044-5739-466c-96aa-0c82ab465d0b
+#from.owls-122130-jdk8-ol73=799fc764-af80-45c3-aea1-599f55901e73
from.owls-122140-jdk8-ol76=6637154a-06d2-4ac0-82ab-2a1d7e391eab
from.owls-141100-jdk8-ol76=060d9c3f-cc20-4380-a383-fd20594e5b2a
from.owls-141100-jdk11-ol76=3220431f-33d4-416a-8df7-a0fcc23a25e4
-
+from.owls-122140-jdk8-ol87=f2ae4133-abd1-4711-ae74-aeb6e498f2c0
+from.owls-122140-jdk8-ol91=2b7d87a9-981a-44af-bf71-b2b479841ed9
+from.owls-141100-jdk8-ol87=d58854b4-a612-4fbf-b095-f2d2178a88df
+from.owls-141100-jdk8-ol91=76d4dbe8-0679-4772-ad2e-461fac83dfd7
+from.owls-141100-jdk11-ol87=c8f1b07d-1660-4f6a-be97-3925645e8817
+from.owls-141100-jdk11-ol91=cd48f178-52a3-415e-88bb-caa45f615b94
+from.owls-141200-jdk21-ol94=6acfc3a0-6225-465f-9fa9-4d63ea81405b
+from.owls-141200-jdk21-ol810=69146c30-9c7e-4ce2-8642-eb8eb1b23cc5
+from.owls-141200-jdk17-ol94=a9a1c9b7-6c27-4086-b271-1ffd3c215390
+from.owls-141200-jdk17-ol810=69b6b433-1679-4d37-af25-3d56987c217a
+
+
+# Pids to indicate which latest base image was chosen. No difference in these
+# between Oracle and Microsoft
+from.owls-122140-jdk8-rhel76=0a52f317-8b40-4a77-9f3c-7607fc3ebfb7wls
+from.owls-141100-jdk8-rhel76=26ec5cf5-dd84-4764-97cf-4f830facbf66wls
+from.owls-141100-jdk11-rhel76=ada2e3e6-faef-4339-aaac-40bcdc4484ecwls
+from.owls-122140-jdk8-rhel87=0799976a-84a5-4a59-b74b-bd67c4d37aa5
+from.owls-141100-jdk8-rhel87=2e471204-8cbe-4aec-9c64-4d0f5f8d590d
+from.owls-141100-jdk11-rhel87=b1f76ba8-078d-43bc-b35c-9a4952eb00c9
\ No newline at end of file
diff --git a/sbom_generation.yaml b/sbom_generation.yaml
new file mode 100644
index 000000000..24f757d1d
--- /dev/null
+++ b/sbom_generation.yaml
@@ -0,0 +1,22 @@
+# Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
+
+# This OCI DevOps build specification file [1] generates a Software Bill of Materials (SBOM) of the repository.
+# The file is needed to run checks for third-party vulnerabilities and business approval according to Oracle’s GitHub policies.
+# [1] https://docs.oracle.com/en-us/iaas/Content/devops/using/build_specs.htm
+
+version: 0.1
+component: build
+timeoutInSeconds: 1000
+shell: bash
+
+steps:
+ - type: Command
+ name: "Run Maven cycloneDX plugin command"
+ command: |
+ # For more details, visit https://github.com/CycloneDX/cyclonedx-maven-plugin/blob/master/README.md
+ mvn org.cyclonedx:cyclonedx-maven-plugin:2.7.9:makeAggregateBom -DincludeRuntimeScope=true -DincludeCompileScope=true -DincludeProvidedScope=false -DincludeSystemScope=false -DincludeTestScope=false -DoutputFormat=json -DoutputName=artifactSBOM -DschemaVersion=1.4
+ mv target/artifactSBOM.json ${OCI_PRIMARY_SOURCE_DIR}/artifactSBOM.json
+outputArtifacts:
+ - name: artifactSBOM
+ type: BINARY
+ location: ${OCI_PRIMARY_SOURCE_DIR}/artifactSBOM.json
diff --git a/weblogic-azure-aks/README.md b/weblogic-azure-aks/README.md
index b8e7b61ea..8725b2442 100644
--- a/weblogic-azure-aks/README.md
+++ b/weblogic-azure-aks/README.md
@@ -10,7 +10,7 @@ The [Azure Marketplace WebLogic Server Offering](https://azuremarketplace.micros
## Documentation
-Please refer to the documentation sample [Oracle WebLogic Server Azure Applications](https://oracle.github.io/weblogic-kubernetes-operator/samples/simple/azure-kubernetes-service/)
+Please refer to the documentation sample [Oracle WebLogic Server Azure Applications](https://aka.ms/wls-aks-docs-sample)
## Examples
diff --git a/weblogic-azure-aks/pom.xml b/weblogic-azure-aks/pom.xml
index fa63fa571..32522acc5 100644
--- a/weblogic-azure-aks/pom.xml
+++ b/weblogic-azure-aks/pom.xml
@@ -1,28 +1,39 @@
-
- 4.0.0
+
+ 4.0.0
-
+
+
- com.oracle.weblogic.azure
- wls-on-aks-azure-marketplace
- 1.0.18
+
+ com.oracle.weblogic.azure
+ weblogic-azure
+ 1.0.0
+
-
- com.microsoft.azure.iaas
- azure-javaee-iaas-parent
- 1.0.10
-
-
+ com.oracle.weblogic.azure
+ wls-on-aks-azure-marketplace
+ ${version.wls-on-aks-azure-marketplace}
+
+ jar
+ wls-on-aks-azure-marketplace
+
+
+ ${project.basedir}/..
+
+ false
+ false
+
- jar
- wls-on-aks-azure-marketplace
-
- -TestParameter '@{"PasswordMinLength"=6}'
-
diff --git a/weblogic-azure-aks/src/main/arm/createUiDefinition.json b/weblogic-azure-aks/src/main/arm/createUiDefinition.json
index 812fac057..7600b9c04 100644
--- a/weblogic-azure-aks/src/main/arm/createUiDefinition.json
+++ b/weblogic-azure-aks/src/main/arm/createUiDefinition.json
@@ -3,12 +3,36 @@
"handler": "Microsoft.Azure.CreateUIDef",
"version": "0.1.2-preview",
"parameters": {
+ "config": {
+ "basics": {
+ "resourceGroup": {
+ "allowExisting": true
+ }
+ }
+ },
"basics": [
+ {
+ "name": "infoForBeforeDeployment",
+ "type": "Microsoft.Common.InfoBox",
+ "visible": true,
+ "options": {
+ "icon": "Info",
+ "text": "The Azure identity deploying this offer must have one of the following two sets of Azure role-based access control roles:
Contributor and User Access Administrator of the current subscription.Owner of the current subscription."
+ }
+ },
{
"name": "basicsRequired",
"type": "Microsoft.Common.Section",
"label": "Credentials for WebLogic",
"elements": [
+ {
+ "name": "listVMSizes",
+ "type": "Microsoft.Solutions.ArmApiControl",
+ "request": {
+ "method": "GET",
+ "path": "[concat(subscription().id, '/providers/Microsoft.Compute/locations/',location(),'/vmSizes?api-version=2024-03-01')]"
+ }
+ },
{
"name": "wlsUserName",
"type": "Microsoft.Common.TextBox",
@@ -21,10 +45,6 @@
{
"regex": "^[a-z0-9A-Z]{1,30}$",
"message": "The value must be 1-30 characters long and must only contain letters and numbers."
- },
- {
- "isValid": "[greaterOrEquals(length(basics('basicsRequired').identity.userAssignedIdentities),1)]",
- "message": "Please select at least one user assigned managed identity from User assigned managed identity control below."
}
]
},
@@ -52,10 +72,10 @@
"name": "wdtRuntimePassword",
"type": "Microsoft.Common.PasswordBox",
"label": {
- "password": "Password for WebLogic Deploy Tooling runtime encrytion",
+ "password": "Password for WebLogic Model encryption",
"confirmPassword": "Confirm password"
},
- "toolTip": "The model WebLogic Deploy Tooling runtime encrytion secret.",
+ "toolTip": "Model in Image requires a runtime encryption secret with a secure `password` key. This secret is used by the operator to encrypt model and domain home artifacts before it adds them to a runtime ConfigMap or log. See https://aka.ms/wls-aks-model-runtime-encryption-secret.",
"constraints": {
"required": true,
"regex": "^(?=.*[A-Z])(?=.*[a-z])(?=.*\\d)[A-Za-z\\d]{12,}$",
@@ -65,72 +85,6 @@
"hideConfirmation": false
},
"visible": true
- },
- {
- "name": "ocrSSOInfo",
- "type": "Microsoft.Common.InfoBox",
- "visible": true,
- "options": {
- "icon": "Info",
- "text": "Provide an Oracle Single Sign-On (SSO) account to access the Oracle Registry Server. Click the link to create Oracle SSO account.",
- "uri": "https://profile.oracle.com/myprofile/account/create-account.jspx"
- }
- },
- {
- "name": "ocrSSOUserName",
- "type": "Microsoft.Common.TextBox",
- "label": "Username for Oracle Single Sign-On authentication",
- "defaultValue": "example@contoso.com",
- "toolTip": "Username for Oracle Single Sign-On authentication to login the Oracle Container Registry.",
- "constraints": {
- "required": true,
- "regex": "^([a-zA-Z0-9_\\-\\.]+)@([a-zA-Z0-9_\\-\\.]+)\\.([a-zA-Z]{2,5})$",
- "validationMessage": "The value must be an email address."
- },
- "visible": true
- },
- {
- "name": "ocrSSOPassword",
- "type": "Microsoft.Common.PasswordBox",
- "label": {
- "password": "Password for Oracle Single Sign-On authentication",
- "confirmPassword": "Confirm password"
- },
- "toolTip": "Password for Oracle Single Sign-On authentication to login the Oracle Container Registry.",
- "constraints": {
- "required": true,
- "regex": "^(?=.*[A-Z])(?=.*[a-z])(?=.*\\d)[A-Za-z\\d\\$\\&\\+\\,:\\=\\?@#|'.\\^\\*!\\-_~/'\\[\\]\\{\\}\"]{8,}$",
- "validationMessage": "The password must contain at least 8 characters, with at least 1 uppercase letter, 1 lowercase letter and 1 number, and special characters, but should not contain > < ( ) % ; \\."
- },
- "options": {
- "hideConfirmation": false
- },
- "visible": true
- },
- {
- "name": "errInfo",
- "type": "Microsoft.Common.InfoBox",
- "visible": "[less(length(basics('basicsRequired').identity.userAssignedIdentities),1)]",
- "options": {
- "icon": "Error",
- "text": "Please select at least one user assigned managed identity.",
- "uri": "https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-to-manage-ua-identity-portal"
- }
- },
- {
- "name": "identity",
- "type": "Microsoft.ManagedIdentity.IdentitySelector",
- "label": "Managed Identity Configuration",
- "toolTip": {
- "userAssignedIdentity": "Add user assigned identities to enable the WebLogic domain deployment."
- },
- "defaultValue": {
- "systemAssignedIdentity": "Off"
- },
- "options": {
- "hideSystemAssignedIdentity": true,
- "hideUserAssignedIdentity": false
- }
}
],
"visible": true
@@ -204,10 +158,10 @@
"type": "Microsoft.Common.Slider",
"min": 5,
"max": 1000,
- "label": "Cluster size",
+ "label": "Maximum dynamic cluster size",
"defaultValue": 5,
"showStepMarkers": false,
- "toolTip": "The maximum size of the WebLogic cluster.",
+ "toolTip": "The maximum size of the dynamic WebLogic cluster.",
"constraints": {
"required": true
},
@@ -225,17 +179,31 @@
},
"visible": "[bool(basics('basicsOptional').basicsOptionalAcceptDefaults)]"
},
+ {
+ "name": "enableT3TunnelingTextBlock",
+ "type": "Microsoft.Common.TextBlock",
+ "visible": false,
+ "options": {
+ "text": "If checked, configure the necessary settings to enable T3 tunneling. You must take additional action on the Networking tab if either of these are checked.",
+ "link": {
+ "label": "Learn more",
+ "uri": "https://aka.ms/wls-aks-t3-tunneling"
+ }
+ }
+ },
{
"name": "enableAdminT3Tunneling",
"type": "Microsoft.Common.CheckBox",
- "label": "Enable T3 tunneling for Admin Server",
- "visible": "[bool(basics('basicsOptional').basicsOptionalAcceptDefaults)]"
+ "label": "Enable T3 tunneling for Administration Server",
+ "toolTip": "If checked, enable T3 tunneling for Administration Server",
+ "visible": false
},
{
"name": "enableClusterT3Tunneling",
"type": "Microsoft.Common.CheckBox",
"label": "Enable T3 tunneling for WebLogic cluster",
- "visible": "[bool(basics('basicsOptional').basicsOptionalAcceptDefaults)]"
+ "toolTip": "If checked, enable T3 tunneling for WebLogic cluster.",
+ "visible": false
}
],
"visible": true
@@ -245,6 +213,18 @@
"type": "Microsoft.Common.Section",
"label": "Report issues, get help, and share feedback",
"elements": [
+ {
+ "name": "help",
+ "type": "Microsoft.Common.TextBlock",
+ "visible": true,
+ "options": {
+ "text": "See the documentation for this offer in the Oracle WebLogic Kubernetes Operator.",
+ "link": {
+ "label": "Offer documentation",
+ "uri": "https://aka.ms/wls-aks-docs"
+ }
+ }
+ },
{
"name": "howToReportIssueText",
"type": "Microsoft.Common.TextBlock",
@@ -253,7 +233,7 @@
"text": "If you encounter problems during the deployment of Oracle WebLogic Server, report them here.",
"link": {
"label": "Issue tracker",
- "uri": "https://aka.ms/arm-oraclelinux-wls-issues"
+ "uri": "https://aka.ms/arm-oraclelinux-wls-issues?version=${project.version}"
}
}
},
@@ -283,26 +263,17 @@
}
],
"visible": true
- },
- {
- "name": "About",
- "type": "Microsoft.Common.InfoBox",
- "options": {
- "icon": "None",
- "text": "Template version ${project.version}"
- },
- "visible": "[bool('${template.version.visible}')]"
}
],
"steps": [
{
"name": "section_aks",
- "label": "Configure AKS cluster",
+ "label": "AKS",
"subLabel": {
"preValidation": "Provide required info for AKS cluster configuration",
"postValidation": "Done"
},
- "bladeTitle": "Configure AKS cluster",
+ "bladeTitle": "AKS",
"elements": [
{
"name": "clusterInfo",
@@ -329,6 +300,14 @@
"required": true
}
},
+ {
+ "name": "infoBoxAks",
+ "type": "Microsoft.Common.InfoBox",
+ "visible": "[not(bool(steps('section_aks').clusterInfo.createAKSCluster))]",
+ "options": {
+ "text": "Ensure that the AKS cluster is configured with the following networking settings:
Container networking: Azure CNI Node SubnetNetwork policy: Azure"
+ }
+ },
{
"name": "aksClusterSelector",
"type": "Microsoft.Solutions.ResourceSelector",
@@ -344,32 +323,76 @@
"visible": "[not(bool(steps('section_aks').clusterInfo.createAKSCluster))]"
},
{
- "name": "aksNodeCount",
- "type": "Microsoft.Common.Slider",
- "min": 1,
- "max": 1000,
- "label": "Node count",
- "defaultValue": 2,
- "showStepMarkers": false,
- "toolTip": "The number of nodes that should be created along with the cluster. You will be able to resize the cluster later.",
- "constraints": {
- "required": true
- },
- "visible": "[bool(steps('section_aks').clusterInfo.createAKSCluster)]"
+ "name": "aksSupportedVersionTextBlock",
+ "type": "Microsoft.Common.TextBlock",
+ "visible": "[bool(steps('section_aks').clusterInfo.createAKSCluster)]",
+ "options": {
+ "icon": "Info",
+ "text": "AKS supports a range of Kubernetes versions. This offer is tested with a specific Kubernetes version known to work with WebLogic Server on AKS; click 'Learn more' to find the version information.",
+ "link": {
+ "label": "Learn more",
+ "uri": "https://aka.ms/wls-aks-well-tested-version"
+ }
+ }
},
{
"name": "nodeVMSizeSelector",
"type": "Microsoft.Compute.SizeSelector",
- "label": "Size",
- "toolTip": "",
+ "label": "Node size",
+ "toolTip": "The size of virtual machine to provision.",
"recommendedSizes": [
"Standard_DS2_v2"
],
"constraints": {
- "allowedSizes": [],
- "excludedSizes": [],
- "numAvailabilityZonesRequired": 3,
- "zone": "3"
+ "excludedSizes": [
+ "Standard_A0",
+ "Standard_A1",
+ "Standard_A1_v2",
+ "Standard_F1",
+ "Standard_F1s",
+ "Standard_DS1_v2",
+ "Standard_B12ms",
+ "Standard_B16als_v2",
+ "Standard_B16as_v2",
+ "Standard_B16ls_v2",
+ "Standard_B16ms",
+ "Standard_B16pls_v2",
+ "Standard_B16ps_v2",
+ "Standard_B16s_v2",
+ "Standard_B1ls",
+ "Standard_B1ms",
+ "Standard_B1s",
+ "Standard_B20ms",
+ "Standard_B2als_v2",
+ "Standard_B2as_v2",
+ "Standard_B2ats_v2",
+ "Standard_B2ls_v2",
+ "Standard_B2ms",
+ "Standard_B2pls_v2",
+ "Standard_B2ps_v2",
+ "Standard_B2pts_v2",
+ "Standard_B2s",
+ "Standard_B2s_v2",
+ "Standard_B2ts_v2",
+ "Standard_B32als_v2",
+ "Standard_B32as_v2",
+ "Standard_B32ls_v2",
+ "Standard_B32s_v2",
+ "Standard_B4als_v2",
+ "Standard_B4as_v2",
+ "Standard_B4ls_v2",
+ "Standard_B4ms",
+ "Standard_B4pls_v2",
+ "Standard_B4ps_v2",
+ "Standard_B4s_v2",
+ "Standard_B8als_v2",
+ "Standard_B8as_v2",
+ "Standard_B8ls_v2",
+ "Standard_B8ms",
+ "Standard_B8pls_v2",
+ "Standard_B8ps_v2",
+ "Standard_B8s_v2"
+ ]
},
"options": {
"hideDiskTypeFilter": false
@@ -379,28 +402,45 @@
"visible": "[bool(steps('section_aks').clusterInfo.createAKSCluster)]"
},
{
- "name": "enableAzureMonitoring",
- "type": "Microsoft.Common.CheckBox",
- "label": "Enable Azure Monitoring",
+ "name": "aksNodeCount",
+ "type": "Microsoft.Common.Slider",
+ "min": "[add(1, div(add(12288, mul(if(bool(basics('basicsOptional').basicsOptionalAcceptDefaults), basics('basicsOptional').wlsClusterSize, 5), 1536)), first(filter(basics('basicsRequired').listVMSizes.value, (item) => equals(item.name, steps('section_aks').clusterInfo.nodeVMSizeSelector))).memoryInMB))]",
+ "defaultValue": 3,
+ "max": 998,
+ "label": "Minimum node count",
+ "showStepMarkers": false,
+ "toolTip": "Set the minimum node count for the cluster.",
+ "constraints": {
+ "required": true
+ },
"visible": "[bool(steps('section_aks').clusterInfo.createAKSCluster)]"
},
{
- "name": "enableAzureFileShare",
- "type": "Microsoft.Common.CheckBox",
- "label": "Create Persistent Volume using Azure File share service",
+ "name": "aksNodeMaxCount",
+ "type": "Microsoft.Common.Slider",
+ "min": "[add(steps('section_aks').clusterInfo.aksNodeCount,2)]",
+ "defaultValue": 3,
+ "max": 1000,
+ "label": "Maximum node count",
+ "showStepMarkers": false,
+ "toolTip": "Set the maximum node count for the cluster.",
+ "constraints": {
+ "required": true
+ },
"visible": "[bool(steps('section_aks').clusterInfo.createAKSCluster)]"
}
]
},
{
- "name": "acrInfo",
+ "name": "imageInfo",
"type": "Microsoft.Common.Section",
- "label": "Azure Container Registry",
+ "label": "Image selection",
"elements": [
{
- "name": "createACR",
+ "name": "oracleCreateACR",
"type": "Microsoft.Common.OptionsGroup",
- "label": "Create a new ACR instance?",
+ "visible": true,
+ "label": "Create a new Azure Container Registry to store application images?",
"defaultValue": "Yes",
"toolTip": "Select 'Yes' to create a new ACR instance, or select 'No' to provide an existing ACR instance.",
"constraints": {
@@ -418,17 +458,17 @@
}
},
{
- "name": "acrInfo",
+ "name": "oracleAcrInfo",
"type": "Microsoft.Common.InfoBox",
- "visible": "[not(bool(steps('section_aks').acrInfo.createACR))]",
+ "visible": "[not(bool(steps('section_aks').imageInfo.oracleCreateACR))]",
"options": {
"icon": "Info",
- "text": "Make sure the Azure Container Registry has enabled admin user.",
+ "text": "Make sure the Azure Container Registry has enabled the admin user.",
"uri": "https://docs.microsoft.com/en-us/azure/container-registry/container-registry-authentication#admin-account"
}
},
{
- "name": "acrSelector",
+ "name": "oracleAcrSelector",
"type": "Microsoft.Solutions.ResourceSelector",
"label": "Select ACR instance",
"toolTip": "Select the existing ACR instance.",
@@ -439,34 +479,165 @@
"location": "onBasics"
}
},
- "visible": "[not(bool(steps('section_aks').acrInfo.createACR))]"
- }
- ]
- },
- {
- "name": "imageInfo",
- "type": "Microsoft.Common.Section",
- "label": "Oracle WebLogic Image",
- "elements": [
+ "visible": "[not(bool(steps('section_aks').imageInfo.oracleCreateACR))]"
+ },
{
- "name": "fromImageText",
- "type": "Microsoft.Common.TextBlock",
+ "name": "ocrSSOInfo",
+ "type": "Microsoft.Common.InfoBox",
"visible": true,
"options": {
- "text": "This value is appended to 'container-registry.oracle.com/middleware/weblogic:' and used in the Dockerfile from statement. \nOracle Standard Terms and Restrictions terms must be agreed. \nClick the following link to make sure you have agree the terms and check the valid tags.",
- "link": {
- "label": "Must be a valid tag value from Oracle Container Registry",
- "uri": "https://aka.ms/wls-aks-fromImage-tag?${project.version}-${maven.build.timestamp}"
- }
+ "icon": "Info",
+ "text": "Provide an Oracle Single Sign-On (SSO) account to access the Oracle Registry Server. Select the link to create an Oracle SSO account.",
+ "uri": "https://aka.ms/wls-aks-create-sso-account"
+ }
+ },
+ {
+ "name": "ocrSSOUserName",
+ "type": "Microsoft.Common.TextBox",
+ "label": "Username for Oracle Single Sign-On authentication",
+ "defaultValue": "example@contoso.com",
+ "toolTip": "Username for Oracle Single Sign-On authentication to login the Oracle Container Registry.",
+ "constraints": {
+ "required": true,
+ "regex": "^([a-zA-Z0-9_\\-\\.]+)@([a-zA-Z0-9_\\-\\.]+)\\.([a-zA-Z]{2,5})$",
+ "validationMessage": "The value must be an email address."
+ },
+ "visible": true
+ },
+ {
+ "name": "ocrSSOTokenInfo",
+ "type": "Microsoft.Common.InfoBox",
+ "visible": true,
+ "options": {
+ "icon": "Info",
+ "text": "Please use an Auth Token associated with an SSO user. Select the link to see more details.",
+ "uri": "https://aka.ms/wls-auth-token"
+ }
+ },
+ {
+ "name": "ocrSSOPassword",
+ "type": "Microsoft.Common.PasswordBox",
+ "label": {
+ "password": "Auth token for Oracle Single Sign-On authentication",
+ "confirmPassword": "Confirm token"
+ },
+ "toolTip": "Auth token for Oracle Single Sign-On authentication to login the Oracle Container Registry.",
+ "constraints": {
+ "required": true,
+ "regex": "^(?=.*[A-Z])(?=.*[a-z])(?=.*\\d)[A-Za-z\\d\\$\\&\\+\\,:\\=\\?@#|'.\\^\\*!\\-_~/'\\[\\]\\{\\}\"]{8,}$",
+ "validationMessage": "The token must contain at least 8 characters, with at least 1 uppercase letter, 1 lowercase letter and 1 number, and special characters, but should not contain > < ( ) % ; \\."
+ },
+ "options": {
+ "hideConfirmation": false
+ },
+ "visible": true
+ },
+ {
+ "name": "fromImageInfo",
+ "type": "Microsoft.Common.InfoBox",
+ "visible": true,
+ "options": {
+ "icon": "Warning",
+ "text": "Before moving forward, you must accept the Oracle Standard Terms and Restrictions for the WebLogic Server
image in the Oracle Container Registry (OCR). The deployment will fail if you have not accepted the terms.
If you have an Oracle support contract, we recommend using the Patched WebLogic Server Images.
Otherwise, you can use the General WebLogic Server Images.
To use the General WebLogic Server Images:To use the Patched WebLogic Server Images:Once you have accepted the terms in OCR, make sure to select the right type of image below."
}
},
{
- "name": "fromImage",
+ "name": "isSSOSupportEntitled",
+ "type": "Microsoft.Common.OptionsGroup",
+ "label": "Select the type of WebLogic Server Images.",
+ "defaultValue": "General WebLogic Server Images",
+ "toolTip": "If 'Patched WebLogic Server Images' is selected, the deployment process will pull from the Patched WebLogic Server Images repository. If 'General WebLogic Server Images' is selected the deployment process will pull from the General WebLogic Server Images repository.",
+ "constraints": {
+ "allowedValues": [
+ {
+ "label": "General WebLogic Server Images",
+ "value": "false"
+ },
+ {
+ "label": "Patched WebLogic Server Images",
+ "value": "true"
+ }
+ ],
+ "required": true
+ },
+ "visible": true
+ },
+ {
+ "name": "oracleImageSelector",
+ "type": "Microsoft.Common.DropDown",
+ "label": "Select desired combination of WebLogic Server, JDK and Operating System or fully qualified Docker tag",
+ "defaultValue": "14.1.2.0-17-ol9",
+ "multiLine": true,
+ "toolTip": "Select image",
+ "constraints": {
+ "allowedValues": [
+ {
+ "label": "14.1.2.0-17-ol9",
+ "description": "14c (14.1.2.0) on JDK 17 on Oracle Linux 9",
+ "value": "14.1.2.0-generic-jdk17-ol9"
+ },
+ {
+ "label": "14.1.2.0-21-ol9",
+ "description": "14c (14.1.2.0) on JDK 21 on Oracle Linux 9",
+ "value": "14.1.2.0-generic-jdk21-ol9"
+ },
+ {
+ "label": "14.1.2.0-17-ol8",
+ "description": "14c (14.1.2.0) on JDK 17 on Oracle Linux 8",
+ "value": "14.1.2.0-generic-jdk17-ol8"
+ },
+ {
+ "label": "14.1.2.0-21-ol8",
+ "description": "14c (14.1.2.0) on JDK 21 on Oracle Linux 8",
+ "value": "14.1.2.0-generic-jdk21-ol8"
+ },
+ {
+ "label": "14.1.1.0-8",
+ "description": "14c (14.1.1.0) on JDK 8 on Oracle Linux 7",
+ "value": "14.1.1.0-8"
+ },
+ {
+ "label": "14.1.1.0-11",
+ "description": "14c (14.1.1.0) on JDK 11 on Oracle Linux 7",
+ "value": "14.1.1.0-11"
+ },
+ {
+ "label": "12.2.1.4",
+ "description": "12cR2 (12.2.1.4) on JDK 8 on Oracle Linux 7",
+ "value": "12.2.1.4"
+ },
+ {
+ "label": "14.1.1.0-8-ol8",
+ "description": "14c (14.1.1.0.0) on JDK 8 on Oracle Linux 8",
+ "value": "14.1.1.0-8-ol8"
+ },
+ {
+ "label": "14.1.1.0-11-ol8",
+ "description": "14c (14.1.1.0.0) on JDK 11 on Oracle Linux 8",
+ "value": "14.1.1.0-11-ol8"
+ },
+ {
+ "label": "12.2.1.4-ol8",
+ "description": "12cR2 (12.2.1.4) on JDK 8 on Oracle Linux 8",
+ "value": "12.2.1.4-ol8"
+ },
+ {
+ "label": "Others",
+ "description": "Specify fully qualified Oracle Container Registry tag",
+ "value": "others"
+ }
+ ],
+ "required": true
+ },
+ "visible": true
+ },
+ {
+ "name": "fromOracleImage",
"type": "Microsoft.Common.TextBox",
+ "visible": "[equals(steps('section_aks').imageInfo.oracleImageSelector, 'others')]",
"label": "WebLogic Docker tag",
- "defaultValue": "12.2.1.4-ol8",
- "toolTip": "Docker tag that comes after 'container-registry.oracle.com/middleware/weblogic:' in the fromImage option to 'imagetool'.",
- "multiLine": false,
+ "defaultValue": "14.1.1.0-slim-11",
+ "toolTip": "Docker tag that comes after 'container-registry.oracle.com/middleware/weblogic:' in the fromOracleImage option to 'imagetool'.",
"constraints": {
"required": true,
"validations": [
@@ -476,6 +647,15 @@
}
]
}
+ },
+ {
+ "name": "unsupportedPlatformInfo1",
+ "type": "Microsoft.Common.InfoBox",
+ "visible": "[and(contains(steps('section_aks').clusterInfo.nodeVMSizeSelector,'p'), contains(parse('[\"14.1.1.0-11\", \"14.1.1.0-8\", \"14.1.1.0-slim-11\", \"14.1.1.0-slim-8\", \"12.2.1.4\", \"12.2.1.4-slim\"]'),if(equals(steps('section_aks').imageInfo.oracleImageSelector, 'others'), steps('section_aks').imageInfo.fromOracleImage, steps('section_aks').imageInfo.oracleImageSelector)))]",
+ "options": {
+ "icon": "Error",
+ "text": "The selected image is not compatible with the ARM64 platform. Please choose a different image or select a different size from AKS -> Azure Kubernetes Service -> Node size. For more information, see the Azure documentation."
+ }
}
],
"visible": true
@@ -483,14 +663,14 @@
{
"name": "jeeAppInfo",
"type": "Microsoft.Common.Section",
- "label": "Java EE Application",
+ "label": "Application",
"elements": [
{
"name": "uploadAppPackage",
"type": "Microsoft.Common.OptionsGroup",
- "label": "Deploy your application package?",
- "defaultValue": "Yes",
- "toolTip": "Select 'Yes' to deploy your application, or select 'No' to deploy a default 'hello world' open liberty application.",
+ "label": "Deploy an application?",
+ "defaultValue": "No",
+ "toolTip": "Select 'Yes' to deploy your application.",
"constraints": {
"allowedValues": [
{
@@ -505,6 +685,15 @@
"required": true
}
},
+ {
+ "name": "appInfoBox",
+ "type": "Microsoft.Common.InfoBox",
+ "options": {
+ "icon": "Info",
+ "text": "You must select the application files from Azure Storage Account.
Follow the steps to upload your applications to an Azure Storage Account:
Create a Storage AccountCreate a container and upload application files"
+ },
+ "visible": "[bool(steps('section_aks').jeeAppInfo.uploadAppPackage)]"
+ },
{
"name": "appPackageUrl",
"type": "Microsoft.Common.FileUpload",
@@ -521,86 +710,193 @@
},
"visible": "[bool(steps('section_aks').jeeAppInfo.uploadAppPackage)]"
},
+ {
+ "name": "validateApplicationsInfo",
+ "type": "Microsoft.Common.InfoBox",
+ "visible": false,
+ "options": {
+ "icon": "Info",
+ "text": "If checked, verify the deployed app reaches the ACTIVE state and fail the deployment if it does not. See the documentation link for more information.",
+ "uri": "https://aka.ms/wls-aks-deployment-state"
+ }
+ },
+ {
+ "name": "validateApplications",
+ "type": "Microsoft.Common.CheckBox",
+ "label": "Fail deployment if application does not become ACTIVE.",
+ "toolTip": "If checked, verify the deployed app reaches the ACTIVE state and fail the deployment if it does not. See the documentation link for more information.",
+ "visible": false
+ },
{
"name": "appReplicas",
"type": "Microsoft.Common.TextBox",
- "label": "Number of application replicas",
+ "label": "Number of WebLogic Managed Server replicas",
"defaultValue": "2",
- "toolTip": "The number of application replicas to deploy.",
+ "toolTip": "The number of WebLogic Managed Server replicas to deploy.",
"constraints": {
"required": true,
- "regex": "^(1|2|3|4|5)$",
- "validationMessage": "Number of application replicas to deploy, limit 1-5."
+ "regex": "^(1|2|3|4|5){1}$",
+ "validationMessage": "Number of WebLogic Managed Server replicas to deploy, limit 1-5."
}
}
],
"visible": true
- }
- ]
- },
- {
- "name": "section_sslConfiguration",
- "type": "Microsoft.Common.Section",
- "label": "TLS/SSL Configuration",
- "elements": [
- {
- "name": "sslConfigurationText",
- "type": "Microsoft.Common.TextBlock",
- "visible": true,
- "options": {
- "text": "Selecting 'Yes' here will cause the template to provision WebLogic Administration Console, Remote Console, cluster and custom T3 channel on HTTPS (Secure) port, with your own TLS/SSL certificate.",
- "link": {
- "label": "Learn more",
- "uri": "https://aka.ms/arm-oraclelinux-wls-ssl-config"
- }
- }
},
{
- "name": "enableCustomSSL",
+ "name": "aksAdvancedConfig",
"type": "Microsoft.Common.OptionsGroup",
- "label": "Configure end to end TLS/SSL to WebLogic Administration Console and cluster on HTTPS (Secure) port, with your own certificate?",
+ "label": "Show advanced configuration?",
"defaultValue": "No",
- "toolTip": "Select 'Yes' to configure end to end TLS/SSL to WebLogic Administration Console and cluster on HTTPS (Secure) port, with your own certificate.",
+ "toolTip": "Select 'Yes' to edit advanced configuration.",
"constraints": {
"allowedValues": [
{
"label": "Yes",
- "value": true
+ "value": "true"
},
{
"label": "No",
- "value": false
+ "value": "false"
}
],
- "required": false
+ "required": true
}
},
{
- "name": "sslConfigurationAccessOption",
- "type": "Microsoft.Common.OptionsGroup",
- "visible": "[steps('section_sslConfiguration').enableCustomSSL]",
- "label": "How would you like to provide required configuration",
- "defaultValue": "Upload existing KeyStores",
- "toolTip": "Select 'Upload existing KeyStores' to use local stored KeyStores.",
- "constraints": {
- "allowedValues": [
- {
- "label": "Upload existing KeyStores",
- "value": "uploadConfig"
- },
- {
- "label": "Use KeyStores stored in Azure Key Vault",
- "value": "keyVaultStoredConfig"
- }
- ],
- "required": false
+ "name": "aksAdvancedInfo",
+ "type": "Microsoft.Common.Section",
+ "label": "Advanced",
+ "elements": [
+ {
+ "name": "enableAzureMonitoringTextBlock",
+ "type": "Microsoft.Common.TextBlock",
+ "visible": "[bool(steps('section_aks').clusterInfo.createAKSCluster)]",
+ "options": {
+ "icon": "Info",
+ "text": "If checked, configure the necessary settings to integrate with Container insights. Container insights gives you performance visibility by collecting memory and processor metrics from controllers, nodes, and containers that are available in Kubernetes through the Metrics API. Container logs are also collected. Metrics are written to the metrics store and log data is written to the logs store associated with your Log Analytics workspace.",
+ "link": {
+ "label": "Learn more",
+ "uri": "https://aka.ms/wls-aks-container-insights"
+ }
+ }
+ },
+ {
+ "name": "enableAzureMonitoring",
+ "type": "Microsoft.Common.CheckBox",
+ "label": "Enable Container insights",
+ "toolTip": "If checked, configure the necessary settings to integrate with Container insights.",
+ "visible": "[bool(steps('section_aks').clusterInfo.createAKSCluster)]"
+ },
+ {
+ "name": "enableAzureFileShareTextBlock",
+ "type": "Microsoft.Common.TextBlock",
+ "visible": "[bool(steps('section_aks').clusterInfo.createAKSCluster)]",
+ "options": {
+ "icon": "Info",
+ "text": "If checked, configure the necessary settings to mount a persistent volume to the nodes of the AKS cluster. This can be useful for storing log files outside of the AKS cluster, among other possible uses. An Azure Storage Account and an Azure Files share will be provisioned; static persistent volume with the Azure Files share will be mounted to the nodes of the AKS cluster.",
+ "link": {
+ "label": "Learn more",
+ "uri": "https://aka.ms/wls-aks-persistent-storage"
+ }
+ }
+ },
+ {
+ "name": "enableAzureFileShare",
+ "toolTip": "If checked, configure the necessary settings to mount a persistent volume to the nodes of the AKS cluster.",
+ "type": "Microsoft.Common.CheckBox",
+ "label": "Create Persistent Volume using Azure File share service",
+ "visible": "[bool(steps('section_aks').clusterInfo.createAKSCluster)]"
+ },
+ {
+ "name": "useAcrImage",
+ "type": "Microsoft.Common.CheckBox",
+ "label": "Bring your own WebLogic Server Docker image from Azure Container Registry?",
+ "toolTip": "Select 'Yes' to use a pre-existing Docker image, assumed to be a compatible WebLogic Server image, from the specified ACR instance. This allows the use of custom images, such as with a specific set of patches (PSUs)."
+ },
+ {
+ "name": "userProvidedAcrInfo",
+ "type": "Microsoft.Common.InfoBox",
+ "visible": "[bool(steps('section_aks').aksAdvancedInfo.useAcrImage)]",
+ "options": {
+ "icon": "Info",
+ "text": "Make sure the Azure Container Registry has enabled the admin user.",
+ "uri": "https://docs.microsoft.com/en-us/azure/container-registry/container-registry-authentication#admin-account"
+ }
+ },
+ {
+ "name": "userProvidedAcrSelector",
+ "type": "Microsoft.Solutions.ResourceSelector",
+ "label": "Select existing ACR instance",
+ "toolTip": "Select the existing ACR instance.",
+ "resourceType": "Microsoft.ContainerRegistry/registries",
+ "options": {
+ "filter": {
+ "subscription": "onBasics",
+ "location": "onBasics"
+ }
+ },
+ "visible": "[bool(steps('section_aks').aksAdvancedInfo.useAcrImage)]"
+ },
+ {
+ "name": "userProvidedImagePath",
+ "type": "Microsoft.Common.TextBox",
+ "visible": "[bool(steps('section_aks').aksAdvancedInfo.useAcrImage)]",
+ "label": "Please provide the image path",
+ "toolTip": "Please provide the image path, the image must be stored in the selected ACR above",
+ "defaultValue": "",
+ "constraints": {
+ "required": true,
+ "regex": "[concat(coalesce(last(split(steps('section_aks').aksAdvancedInfo.userProvidedAcrSelector.id, '/')), ''), '.*$')]",
+ "validationMessage": "The image must be stored in the selected ACR above"
+ }
+ }
+ ],
+ "visible": "[bool(steps('section_aks').aksAdvancedConfig)]"
+ }
+ ]
+ },
+ {
+ "name": "section_sslConfiguration",
+ "type": "Microsoft.Common.Section",
+ "label": "TLS/SSL",
+ "elements": [
+ {
+ "name": "sslConfigurationText",
+ "type": "Microsoft.Common.TextBlock",
+ "visible": true,
+ "options": {
+ "text": "Selecting 'Yes' here will cause the offer to configure WebLogic Server Administration Console, Remote Console and cluster to use HTTPS (Secure) ports, with your own TLS/SSL certificate.",
+ "link": {
+ "label": "Learn more",
+ "uri": "https://aka.ms/arm-oraclelinux-wls-ssl-config"
+ }
}
},
+ {
+ "name": "enableCustomSSL",
+ "type": "Microsoft.Common.OptionsGroup",
+ "label": "Configure end to end TLS/SSL to WebLogic Server Administration Console and cluster on HTTPS (Secure) port, with your own certificate?",
+ "defaultValue": "No",
+ "toolTip": "Select 'Yes' to configure end to end TLS/SSL to WebLogic Server Administration Console and cluster on HTTPS (Secure) port, with your own certificate.",
+ "constraints": {
+ "allowedValues": [
+ {
+ "label": "Yes",
+ "value": true
+ },
+ {
+ "label": "No",
+ "value": false
+ }
+ ],
+ "required": false
+ }
+ },
{
"name": "uploadedCustomSSLSettings",
"type": "Microsoft.Common.Section",
- "visible": "[and(steps('section_sslConfiguration').enableCustomSSL, equals(steps('section_sslConfiguration').sslConfigurationAccessOption, 'uploadConfig'))]",
- "label": "TLS/SSL Configuration Settings",
+ "visible": "[steps('section_sslConfiguration').enableCustomSSL]",
+ "label": "TLS/SSL configuration settings",
"elements": [
{
"name": "sslKeystoreInfo0",
@@ -754,218 +1050,254 @@
}
}
]
+ }
+ ]
+ },
+ {
+ "name": "section_appGateway",
+ "type": "Microsoft.Common.Section",
+ "label": "Load balancing",
+ "subLabel": {
+ "preValidation": "Provide required information for load balancing",
+ "postValidation": "Done"
+ },
+ "bladeTitle": "Load balancing",
+ "elements": [
+ {
+ "name": "connectToAGText",
+ "type": "Microsoft.Common.TextBlock",
+ "visible": true,
+ "options": {
+ "text": "This blade allows configuring options for load balancing and ingress controller."
+ }
+ },
+ {
+ "name": "loadBalancingOptions",
+ "type": "Microsoft.Common.OptionsGroup",
+ "label": "Load Balancing Options",
+ "defaultValue": "Application Gateway Ingress Controller",
+ "toolTip": "Select load balancing option.",
+ "constraints": {
+ "allowedValues": [
+ {
+ "label": "Application Gateway Ingress Controller",
+ "value": "agic"
+ },
+ {
+ "label": "Standard Load Balancer Service",
+ "value": "lbservice"
+ },
+ {
+ "label": "No Load Balancer",
+ "value": "none"
+ }
+ ],
+ "required": true
+ },
+ "visible": true
},
{
- "name": "keyVaultStoredCustomSSLSettings",
+ "name": "appgwIngress",
"type": "Microsoft.Common.Section",
- "visible": "[and(steps('section_sslConfiguration').enableCustomSSL, equals(steps('section_sslConfiguration').sslConfigurationAccessOption, 'keyVaultStoredConfig'))]",
- "label": "TLS/SSL Configuration Settings",
+ "label": "Application Gateway Ingress Controller",
"elements": [
{
- "name": "sslKeystoreInfo1",
- "type": "Microsoft.Common.InfoBox",
- "visible": "true",
- "options": {
- "icon": "Info",
- "text": "You must provide different files for identity and trust KeyStores. Select here for more details.",
- "uri": "https://aka.ms/arm-oraclelinux-wls-ssl-configuration"
- }
- },
- {
- "name": "keyVaultText",
+ "name": "enableAppGatewayText",
"type": "Microsoft.Common.TextBlock",
- "visible": "true",
+ "visible": true,
"options": {
- "text": "Enabling a HTTPS (Secure) port for the Administration Console requires you to obtain a valid TLS/SSL certificate. The template will look for the certificate and other configuration items in the Azure Key Vault specified here.",
+ "text": "This option will cause the offer to configure an Application Gateway Ingress Controller",
"link": {
"label": "Learn more",
- "uri": "https://aka.ms/arm-oraclelinux-wls-cluster-app-gateway-key-vault"
+ "uri": "https://aka.ms/wls-aks-app-gateway-ingress-controller"
}
}
},
{
- "name": "keyVaultResourceGroup",
- "type": "Microsoft.Common.TextBox",
- "visible": "true",
- "label": "Resource group name in current subscription containing the Key Vault",
- "defaultValue": "",
- "toolTip": "Use only letters and numbers",
- "constraints": {
- "required": true,
- "regex": "^[a-z0-9A-Z.\\-_()]{0,89}([a-z0-9A-Z\\-_()]{1})$",
- "validationMessage": "[if(greater(length(steps('section_sslConfiguration').keyVaultStoredCustomSSLSettings.keyVaultResourceGroup), 90),'Resource group names only allow up to 90 characters.', 'Resource group names only allow alphanumeric characters, periods, underscores, hyphens and parenthesis and cannot end in a period.')]"
+ "name": "vnetInfo",
+ "type": "Microsoft.Common.InfoBox",
+ "visible": true,
+ "options": {
+ "icon": "Info",
+ "text": "When creating a new virtual network, the subnet's address prefix is calculated automatically based on the virtual
network's address prefix. When using an existing virtual network, a minimum virtual network size of /24 and a
minimum subnet size of /24 are required. Additionally, the subnet must be dedicated only for use by the
Application Gateway."
}
},
{
- "name": "keyVaultName",
- "type": "Microsoft.Common.TextBox",
- "visible": "true",
- "label": "Name of the Azure Key Vault containing secrets for the TLS/SSL certificate",
- "defaultValue": "",
- "toolTip": "Use only letters and numbers",
+ "name": "vnetForApplicationGateway",
+ "type": "Microsoft.Network.VirtualNetworkCombo",
+ "label": {
+ "virtualNetwork": "Virtual network",
+ "subnets": "Subnets"
+ },
+ "toolTip": {
+ "virtualNetwork": "Select a virtual network in which to place the Application Gateway.",
+ "subnets": "The subnet must be dedicated only for use by the Application Gateway."
+ },
+ "defaultValue": {
+ "name": "[concat('wlsaks-vnet',take(guid(), 8))]",
+ "addressPrefixSize": "/24"
+ },
"constraints": {
- "required": true,
- "regex": "^(?=.{3,24}$)[a-zA-Z](?!.*--)[a-zA-Z0-9-]*[a-zA-Z0-9]$",
- "validationMessage": "[if(or(greater(length(steps('section_sslConfiguration').keyVaultStoredCustomSSLSettings.keyVaultName), 24), less(length(steps('section_sslConfiguration').keyVaultStoredCustomSSLSettings.keyVaultName), 3)),'Vault name must be between 3-24 alphanumeric characters. The name must begin with a letter, end with a letter or digit, and not contain consecutive hyphens.','Vault name must only contain alphanumeric characters and dashes and cannot start with a number')]"
+ "minAddressPrefixSize": "/24"
+ },
+ "options": {
+ "hideExisting": false
+ },
+ "subnets": {
+ "gatewaySubnet": {
+ "label": "Subnet",
+ "defaultValue": {
+ "name": "wls-aks-gateway-subnet",
+ "addressPrefixSize": "/24"
+ },
+ "constraints": {
+ "minAddressPrefixSize": "/24",
+ "minAddressCount": 250,
+ "requireContiguousAddresses": false
+ }
+ }
+ },
+ "visible": true
+ },
+ {
+ "name": "appgwUsePrivateIP",
+ "type": "Microsoft.Common.CheckBox",
+ "label": "Configure frontend IP with private IP address",
+ "toolTip": "If checked, expose WebLogic Server with private IP address.",
+ "visible": false
+ },
+ {
+ "name": "sslCertText00",
+ "type": "Microsoft.Common.TextBlock",
+ "visible": true,
+ "options": {
+ "text": "Choose an option for providing the TLS/SSL certificate:"
}
},
{
- "name": "keyVaultCustomIdentityKeyStoreDataSecretName",
- "type": "Microsoft.Common.TextBox",
- "visible": "true",
- "label": "The name of the secret in the specified Key Vault whose value is the Identity KeyStore Data",
- "defaultValue": "",
- "toolTip": "Use only letters and numbers",
- "constraints": {
- "required": true,
- "regex": "^[a-z0-9A-Z]{1,30}$",
- "validationMessage": "The value must be 1-30 characters long and must only contain letters and numbers."
+ "name": "sslCertText01",
+ "type": "Microsoft.Common.TextBlock",
+ "visible": true,
+ "options": {
+ "text": " ⁃ Upload a TLS/SSL certificate: Upload the pre-signed certificate now."
}
},
{
- "name": "keyVaultCustomIdentityKeyStorePassPhraseSecretName",
- "type": "Microsoft.Common.TextBox",
- "visible": "true",
- "label": "The name of the secret in the specified Key Vault whose value is the passphrase for the Identity KeyStore",
- "defaultValue": "",
- "toolTip": "Use only letters and numbers",
- "constraints": {
- "required": true,
- "regex": "^[a-z0-9A-Z]{1,30}$",
- "validationMessage": "The value must be 1-30 characters long and must only contain letters and numbers."
+ "name": "sslCertText02",
+ "type": "Microsoft.Common.TextBlock",
+ "visible": true,
+ "options": {
+ "text": " ⁃ Generate a self-signed front-end certificate: Generate a self-signed front-end certificate and apply it during deployment.",
+ "link": {
+ "label": "Learn more",
+ "uri": "https://aka.ms/wls-aks-application-gateway-ssl-key-vault"
+ }
}
},
{
- "name": "keyVaultCustomIdentityKeyStoreType",
- "type": "Microsoft.Common.DropDown",
- "visible": "true",
- "label": "The Identity KeyStore type (JKS,PKCS12)",
- "defaultValue": "JKS",
- "toolTip": "One of the supported KeyStore types",
+ "name": "certificateOption",
+ "type": "Microsoft.Common.OptionsGroup",
+ "label": "Select desired TLS/SSL certificate option",
+ "defaultValue": "Generate a self-signed front-end certificate",
+ "toolTip": "Select desired TLS/SSL certificate option",
"constraints": {
"allowedValues": [
{
- "label": "JKS",
- "value": "JKS"
+ "label": "Generate a self-signed front-end certificate",
+ "value": "generateCert"
},
{
- "label": "PKCS12",
- "value": "PKCS12"
+ "label": "Upload a TLS/SSL certificate",
+ "value": "haveCert"
}
],
"required": true
- }
+ },
+ "visible": true
},
{
- "name": "keyVaultPrivateKeyAliasSecretName",
- "type": "Microsoft.Common.TextBox",
- "visible": "true",
- "label": "The name of the secret in the specified Key Vault whose value is the Private Key Alias",
- "defaultValue": "",
- "toolTip": "Use only letters and numbers",
+ "name": "appGatewaySSLCertData",
+ "type": "Microsoft.Common.FileUpload",
+ "label": "Front-End TLS/SSL certificate(.pfx)",
+ "toolTip": "TLS/SSL certificate used for App Gateway",
"constraints": {
"required": true,
- "regex": "^[a-z0-9A-Z]{1,30}$",
- "validationMessage": "The value must be 1-30 characters long and must only contain letters and numbers."
- }
+ "accept": ".pfx"
+ },
+ "options": {
+ "multiple": false,
+ "uploadMode": "file",
+ "openMode": "binary"
+ },
+ "visible": "[equals(steps('section_appGateway').appgwIngress.certificateOption, 'haveCert')]"
},
{
- "name": "keyVaultPrivateKeyPassPhraseSecretName",
- "type": "Microsoft.Common.TextBox",
- "visible": "true",
- "label": "The name of the secret in the specified Key Vault whose value is the passphrase for the Private Key",
- "defaultValue": "",
- "toolTip": "Use only letters and numbers",
+ "name": "appGatewaySSLCertPassword",
+ "type": "Microsoft.Common.PasswordBox",
+ "label": {
+ "password": "Password",
+ "confirmPassword": "Confirm password"
+ },
+ "toolTip": "Front-End TLS/SSL certificate password",
"constraints": {
- "required": true,
- "regex": "^[a-z0-9A-Z]{1,30}$",
- "validationMessage": "The value must be 1-30 characters long and must only contain letters and numbers."
- }
+ "required": "[equals(steps('section_appGateway').appgwIngress.certificateOption, 'haveCert')]",
+ "regex": "^((?=.*[0-9])(?=.*[a-z])|(?=.*[0-9])(?=.*[a-z])(?=.*[A-Z])|(?=.*[0-9])(?=.*[a-z])(?=.*[!@#$%^&*])|(?=.*[0-9])(?=.*[A-Z])(?=.*[!@#$%^&*])|(?=.*[a-z])(?=.*[A-Z])(?=.*[!@#$%^&*])).{6,128}$",
+ "validationMessage": "The password must contain at least 6 characters, with at least 1 uppercase letter, 1 lowercase letter and 1 number."
+ },
+ "options": {
+ "hideConfirmation": false
+ },
+ "visible": "[equals(steps('section_appGateway').appgwIngress.certificateOption, 'haveCert')]"
},
{
- "name": "keyVaultCustomTrustKeyStoreDataSecretName",
- "type": "Microsoft.Common.TextBox",
- "visible": "true",
- "label": "The name of the secret in the specified Key Vault whose value is the Trust KeyStore Data",
- "defaultValue": "",
- "toolTip": "Use only letters and numbers",
+ "name": "uploadedSSLCertData",
+ "type": "Microsoft.Common.FileUpload",
+ "label": "Trusted root certificate(.cer, .cert)",
+ "toolTip": "Trusted root certificate (CA certificate) used to set up end to end TLS/SSL",
"constraints": {
"required": true,
- "regex": "^[a-z0-9A-Z]{1,30}$",
- "validationMessage": "The value must be 1-30 characters long and must only contain letters and numbers."
- }
+ "accept": ".cer, .cert"
+ },
+ "options": {
+ "multiple": false,
+ "uploadMode": "file",
+ "openMode": "binary"
+ },
+ "visible": "[steps('section_sslConfiguration').enableCustomSSL]"
},
{
- "name": "keyVaultCustomTrustKeyStorePassPhraseSecretName",
- "type": "Microsoft.Common.TextBox",
- "visible": "true",
- "label": "The name of the secret in the specified Key Vault whose value is the passphrase for the Trust KeyStore",
- "defaultValue": "",
- "toolTip": "Use only letters and numbers",
- "constraints": {
- "required": true,
- "regex": "^[a-z0-9A-Z]{1,30}$",
- "validationMessage": "The value must be 1-30 characters long and must only contain letters and numbers."
- }
+ "name": "enableCookieBasedAffinity",
+ "type": "Microsoft.Common.CheckBox",
+ "label": "Disable cookie based affinity",
+ "toolTip": "If checked, disable cookie based affinity",
+ "visible": true
},
{
- "name": "keyVaultCustomTrustKeyStoreType",
- "type": "Microsoft.Common.DropDown",
- "visible": "true",
- "label": "The Trust KeyStore type (JKS,PKCS12)",
- "defaultValue": "JKS",
- "toolTip": "One of the supported KeyStore types",
+ "name": "appgwForAdminServer",
+ "type": "Microsoft.Common.OptionsGroup",
+ "label": "Create ingress for Administration Console. Make sure no application with path /console*, it will cause conflict with Administration Console path.",
+ "defaultValue": "No",
+ "toolTip": "Select 'Yes' to Create ingress for Administration Console. Make sure no application with path /console*, it will cause conflict with Administration Console path.",
"constraints": {
"allowedValues": [
{
- "label": "JKS",
- "value": "JKS"
+ "label": "Yes",
+ "value": true
},
{
- "label": "PKCS12",
- "value": "PKCS12"
+ "label": "No",
+ "value": false
}
- ],
- "required": true
- }
- }
- ]
- }
- ]
- },
- {
- "name": "section_appGateway",
- "type": "Microsoft.Common.Section",
- "label": "Networking",
- "subLabel": {
- "preValidation": "Provide required information for networking",
- "postValidation": "Done"
- },
- "bladeTitle": "Networking",
- "elements": [
- {
- "name": "connectToAGText",
- "type": "Microsoft.Common.TextBlock",
- "visible": true,
- "options": {
- "text": "Selecting 'Yes' here will cause the template to provision Load Balancer service or Ingress service for WebLogic Administration Console and WebLogic cluster.",
- "link": {
- "label": "Learn more",
- "uri": "https://aka.ms/arm-oraclelinux-wls-cluster-app-gateway-overview"
- }
- }
- },
- {
- "name": "lbSVCInfo",
- "type": "Microsoft.Common.Section",
- "label": "Standard Load Balancer service",
- "elements": [
+ ]
+ },
+ "visible": true
+ },
{
- "name": "enableLBSVC",
+ "name": "appgwForAdminRemote",
"type": "Microsoft.Common.OptionsGroup",
- "label": "Create Standard Load Balancer services for the cluster?",
+ "label": "Create ingress for WebLogic Remote Console. Make sure no application with path /remoteconsole*, it will cause conflict with WebLogic Remote Console path.",
"defaultValue": "No",
- "toolTip": "Select 'Yes' to create Standard Load Balancer services for the cluster.",
+ "toolTip": "Select 'Yes' to Create ingress for WebLogic Remote Console. Make sure no application with path /remoteconsole*, it will cause conflict with WebLogic Remote Console path.",
"constraints": {
"allowedValues": [
{
@@ -976,28 +1308,71 @@
"label": "No",
"value": false
}
- ],
- "required": false
+ ]
+ },
+ "visible": true
+ }
+ ],
+ "visible": "[equals(steps('section_appGateway').loadBalancingOptions, 'agic')]"
+ },
+ {
+ "name": "lbSVCInfo",
+ "type": "Microsoft.Common.Section",
+ "label": "Standard Load Balancer Service",
+ "elements": [
+ {
+ "name": "enableLBSVCText",
+ "type": "Microsoft.Common.TextBlock",
+ "visible": true,
+ "options": {
+ "text": "This option will cause the offer to provision the Azure Load Balancer service to expose the WebLogic Server Administration Console and the cluster.",
+ "link": {
+ "label": "Learn more",
+ "uri": "https://aka.ms/wls-aks-standard-load-balancer"
+ }
+ }
+ },
+ {
+ "name": "enableInternalLBText",
+ "type": "Microsoft.Common.TextBlock",
+ "visible": true,
+ "options": {
+ "text": "If checked, the offer will configure the load balancer as an internal load balancer.",
+ "link": {
+ "label": "Learn more",
+ "uri": "https://aka.ms/wls-aks-internal-load-balancer"
+ }
}
},
{
"name": "enableInternalLB",
"type": "Microsoft.Common.CheckBox",
"label": "Use Internal Load Balancer",
- "visible": "[steps('section_appGateway').lbSVCInfo.enableLBSVC]"
+ "toolTip": "If checked, the offer will configure the load balancer as an internal load balancer.",
+ "visible": true
+ },
+ {
+ "name": "infoBoxForLbSvc",
+ "type": "Microsoft.Common.InfoBox",
+ "visible": true,
+ "options": {
+ "icon": "info",
+ "text": "There must be at least one and at most two entries."
+ }
},
{
"name": "lbSVC",
"type": "Microsoft.Common.EditableGrid",
"ariaLabel": "Enter information",
"label": "Standard Load Balancer service",
- "visible": "[steps('section_appGateway').lbSVCInfo.enableLBSVC]",
+ "toolTip": "Create Azure Standard Load Balancer service for Administration Console and cluster.",
+ "visible": true,
"constraints": {
"width": "Full",
"rows": {
"count": {
- "min": 0,
- "max": 4
+ "min": 1,
+ "max": 2
}
},
"columns": [
@@ -1007,7 +1382,8 @@
"width": "2fr",
"element": {
"type": "Microsoft.Common.TextBox",
- "placeholder": "domain1-admin-server",
+ "placeholder": "Input a prefix...",
+ "toolTip": "Input a prefix for service name. For example, inputing 'domain1-admin-server' will cause provisioning a Load Balancer service named with 'domain1-admin-server*' and exposing the Oracle WebLogic Administration Server to Internet.",
"constraints": {
"required": true,
"validations": [
@@ -1021,346 +1397,73 @@
}
]
}
- }
- },
- {
- "id": "colTarget",
- "header": "Target",
- "width": "2fr",
- "element": {
- "name": "dropDownTargets",
- "type": "Microsoft.Common.DropDown",
- "placeholder": "admin-server",
- "constraints": {
- "allowedValues": [
- {
- "label": "admin-server",
- "value": "adminServer"
- },
- {
- "label": "admin-server-t3",
- "value": "adminServerT3"
- },
- {
- "label": "cluster-1",
- "value": "cluster1"
- },
- {
- "label": "cluster-1-t3",
- "value": "cluster1T3"
- }
- ],
- "required": true
- }
- }
- },
- {
- "id": "colPort",
- "header": "Port",
- "width": "1fr",
- "element": {
- "type": "Microsoft.Common.TextBox",
- "placeholder": "7001",
- "constraints": {
- "required": true,
- "validations": [
- {
- "isValid": "[lessOrEquals(length(filter(steps('section_appGateway').lbSVCInfo.lbSVC, (item) => equals(item.colTarget, last(take(steps('section_appGateway').lbSVCInfo.lbSVC, $rowIndex)).colTarget))),1)]",
- "message": "You can not select the same target repeadly."
- },
- {
- "regex": "^()([1-9]|[1-5]?[0-9]{2,4}|6[1-4][0-9]{3}|65[1-4][0-9]{2}|655[1-2][0-9]|6553[1-5])$",
- "message": "Only numbers are allowed, and the value must be 1-65535."
- }
- ]
- }
- }
- }
- ]
- }
- }
- ],
- "visible": true
- },
- {
- "name": "appgwIngress",
- "type": "Microsoft.Common.Section",
- "label": "Application Gateway Ingress",
- "elements": [
- {
- "name": "enableAppGateway",
- "type": "Microsoft.Common.OptionsGroup",
- "label": "Connect to Azure Application Gateway?",
- "defaultValue": "No",
- "toolTip": "Select 'Yes' to create an Azure Application Gateway Ingress as the load balancer for the cluster and admin server.",
- "constraints": {
- "allowedValues": [
- {
- "label": "Yes",
- "value": true
- },
- {
- "label": "No",
- "value": false
- }
- ],
- "required": false
- }
- },
- {
- "name": "keyVaultText00",
- "type": "Microsoft.Common.TextBlock",
- "visible": "[steps('section_appGateway').appgwIngress.enableAppGateway]",
- "options": {
- "text": "Choose an option for providing the TLS/SSL certificate:"
- }
- },
- {
- "name": "keyVaultText01",
- "type": "Microsoft.Common.TextBlock",
- "visible": "[steps('section_appGateway').appgwIngress.enableAppGateway]",
- "options": {
- "text": " ⁃ Upload a TLS/SSL certificate: Upload the pre-signed certificate now."
- }
- },
- {
- "name": "keyVaultText02",
- "type": "Microsoft.Common.TextBlock",
- "visible": "[steps('section_appGateway').appgwIngress.enableAppGateway]",
- "options": {
- "text": " ⁃ Identify an Azure Key Vault: The Key Vault must already contain the certificate and its password stored as secrets."
- }
- },
- {
- "name": "keyVaultText03",
- "type": "Microsoft.Common.TextBlock",
- "visible": "[steps('section_appGateway').appgwIngress.enableAppGateway]",
- "options": {
- "text": " ⁃ Generate a self-signed frontend certificate: generate a self-signed frontend certificate and apply it during deployment.",
- "link": {
- "label": "Learn more",
- "uri": "https://aka.ms/arm-oraclelinux-wls-cluster-app-gateway-key-vault"
- }
- }
- },
- {
- "name": "certificateOption",
- "type": "Microsoft.Common.OptionsGroup",
- "label": "Select desired TLS/SSL certificate option",
- "defaultValue": "Upload a TLS/SSL certificate",
- "toolTip": "Select desired TLS/SSL certificate option",
- "constraints": {
- "allowedValues": [
- {
- "label": "Upload a TLS/SSL certificate",
- "value": "haveCert"
- },
- {
- "label": "Identify an Azure Key Vault",
- "value": "haveKeyVault"
- },
- {
- "label": "Generate a self-signed frontend certificate",
- "value": "generateCert"
- }
- ],
- "required": true
- },
- "visible": "[steps('section_appGateway').appgwIngress.enableAppGateway]"
- },
- {
- "name": "keyVaultSSLCertData",
- "type": "Microsoft.Common.FileUpload",
- "label": "Frontend TLS/SSL certificate(.pfx)",
- "toolTip": "TLS/SSL certificate used for App Gateway",
- "constraints": {
- "required": true,
- "accept": ".pfx"
- },
- "options": {
- "multiple": false,
- "uploadMode": "file",
- "openMode": "binary"
- },
- "visible": "[equals(steps('section_appGateway').appgwIngress.certificateOption, 'haveCert')]"
- },
- {
- "name": "appGatewaySSLCertPassword",
- "type": "Microsoft.Common.PasswordBox",
- "label": {
- "password": "Password",
- "confirmPassword": "Confirm password"
- },
- "toolTip": "Frontend TLS/SSL certificate password",
- "constraints": {
- "required": "[equals(steps('section_appGateway').appgwIngress.certificateOption, 'haveCert')]",
- "regex": "^((?=.*[0-9])(?=.*[a-z])(?=.*[A-Z])(?=.*[!@#$%^&*])).{6,128}$",
- "validationMessage": "The password must contain at least 6 characters, with at least 1 uppercase letter, 1 lowercase letter and 1 number."
- },
- "options": {
- "hideConfirmation": false
- },
- "visible": "[equals(steps('section_appGateway').appgwIngress.certificateOption, 'haveCert')]"
- },
- {
- "name": "keyVaultBackendSSLCertData",
- "type": "Microsoft.Common.FileUpload",
- "label": "Trusted root certificate(.cer, .cert)",
- "toolTip": "Trusted root certificate (CA certificate) used to set up end to end TLS/SSL",
- "constraints": {
- "required": true,
- "accept": ".cer, .cert"
- },
- "options": {
- "multiple": false,
- "uploadMode": "file",
- "openMode": "binary"
- },
- "visible": "[and(steps('section_appGateway').appgwIngress.enableAppGateway, steps('section_sslConfiguration').enableCustomSSL, not(equals(steps('section_appGateway').appgwIngress.certificateOption, 'haveKeyVault')))]"
- },
- {
- "name": "keyVaultResourceGroup",
- "type": "Microsoft.Common.TextBox",
- "label": "Resource group name in current subscription containing the Key Vault",
- "defaultValue": "",
- "toolTip": "Use only letters and numbers",
- "constraints": {
- "required": true,
- "regex": "^[a-z0-9A-Z.\\-_()]{0,89}([a-z0-9A-Z\\-_()]{1})$",
- "validationMessage": "[if(greater(length(steps('section_appGateway').appgwIngress.keyVaultResourceGroup), 90),'Resource group names only allow up to 90 characters.', 'Resource group names only allow alphanumeric characters, periods, underscores, hyphens and parenthesis and cannot end in a period.')]"
- },
- "visible": "[equals(steps('section_appGateway').appgwIngress.certificateOption, 'haveKeyVault')]"
- },
- {
- "name": "keyVaultName",
- "type": "Microsoft.Common.TextBox",
- "label": "Name of the Azure Key Vault containing secrets for the certificate for TLS/SSL Termination",
- "defaultValue": "",
- "toolTip": "Use only letters and numbers",
- "constraints": {
- "required": true,
- "regex": "^(?=.{3,24}$)[a-zA-Z](?!.*--)[a-zA-Z0-9-]*[a-zA-Z0-9]$",
- "validationMessage": "[if(or(greater(length(steps('section_appGateway').appgwIngress.keyVaultName), 24), less(length(steps('section_appGateway').appgwIngress.keyVaultName), 3)),'Vault name must be between 3-24 alphanumeric characters. The name must begin with a letter, end with a letter or digit, and not contain consecutive hyphens.','Vault name must only contain alphanumeric characters and dashes and cannot start with a number')]"
- },
- "visible": "[equals(steps('section_appGateway').appgwIngress.certificateOption, 'haveKeyVault')]"
- },
- {
- "name": "keyVaultSSLCertDataSecretName",
- "type": "Microsoft.Common.TextBox",
- "label": "The name of the secret in the specified Key Vault whose value is the frontend TLS/SSL certificate data",
- "defaultValue": "",
- "toolTip": "Use only letters and numbers",
- "constraints": {
- "required": true,
- "regex": "^[a-z0-9A-Z]{1,30}$",
- "validationMessage": "The value must be 1-30 characters long and must only contain letters and numbers."
- },
- "visible": "[equals(steps('section_appGateway').appgwIngress.certificateOption, 'haveKeyVault')]"
- },
- {
- "name": "keyVaultSSLCertPasswordSecretName",
- "type": "Microsoft.Common.TextBox",
- "label": "The name of the secret in the specified Key Vault whose value is the password for the frontend TLS/SSL certificate",
- "defaultValue": "",
- "toolTip": "Use only letters and numbers",
- "constraints": {
- "required": true,
- "regex": "^[a-z0-9A-Z]{1,30}$",
- "validationMessage": "The value must be 1-30 characters long and must only contain letters and numbers."
- },
- "visible": "[equals(steps('section_appGateway').appgwIngress.certificateOption, 'haveKeyVault')]"
- },
- {
- "name": "keyVaultBackendSSLCertDataSecretName",
- "type": "Microsoft.Common.TextBox",
- "label": "The name of the secret in the specified Key Vault whose value is the trusted root certificate data",
- "defaultValue": "",
- "toolTip": "Use only letters and numbers",
- "constraints": {
- "required": true,
- "regex": "^[a-z0-9A-Z]{1,30}$",
- "validationMessage": "The value must be 1-30 characters long and must only contain letters and numbers."
- },
- "visible": "[and(steps('section_sslConfiguration').enableCustomSSL, equals(steps('section_appGateway').appgwIngress.certificateOption, 'haveKeyVault'))]"
- },
- {
- "name": "servicePrincipal",
- "type": "Microsoft.Common.PasswordBox",
- "label": {
- "password": "Service Principal",
- "confirmPassword": "Confirm password"
- },
- "toolTip": "Base64 encoded JSON blob of the service principal. You can generate one with command 'az ad sp create-for-rbac --sdk-auth | base64 -w0'",
- "constraints": {
- "required": true
- },
- "options": {
- "hideConfirmation": true
- },
- "visible": "[steps('section_appGateway').appgwIngress.enableAppGateway]"
- },
- {
- "name": "enableCookieBasedAffinity",
- "type": "Microsoft.Common.CheckBox",
- "label": "Enable cookie based affinity",
- "visible": "[steps('section_appGateway').appgwIngress.enableAppGateway]"
- },
- {
- "name": "appgwForAdminServer",
- "type": "Microsoft.Common.OptionsGroup",
- "label": "Create ingress for Administration Console. Make sure no application with path /console*, it will cause conflict with Administration Console path.",
- "defaultValue": "Yes",
- "toolTip": "Select 'Yes' to Create ingress for Administration Console. Make sure no application with path /console*, it will cause conflict with Administration Console path.",
- "constraints": {
- "allowedValues": [
- {
- "label": "Yes",
- "value": true
+ }
},
{
- "label": "No",
- "value": false
- }
- ]
- },
- "visible": "[steps('section_appGateway').appgwIngress.enableAppGateway]"
- },
- {
- "name": "appgwForAdminRemote",
- "type": "Microsoft.Common.OptionsGroup",
- "label": "Create ingress for WebLogic Remote Console. Make sure no application with path /remoteconsole*, it will cause conflict with WebLogic Remote Console path.",
- "defaultValue": "Yes",
- "toolTip": "Select 'Yes' to Create ingress for WebLogic Remote Console. Make sure no application with path /remoteconsole*, it will cause conflict with WebLogic Remote Console path.",
- "constraints": {
- "allowedValues": [
- {
- "label": "Yes",
- "value": true
+ "id": "colTarget",
+ "header": "Target",
+ "width": "2fr",
+ "element": {
+ "type": "Microsoft.Common.DropDown",
+ "placeholder": "Select a target...",
+ "toolTip": "Create Azure Standard Load Balancer Servicer for the selected target.",
+ "constraints": {
+ "allowedValues": [
+ {
+ "label": "admin-server",
+ "value": "adminServer"
+ },
+ {
+ "label": "cluster-1",
+ "value": "cluster1"
+ }
+ ],
+ "required": true
+ }
+ }
},
{
- "label": "No",
- "value": false
+ "id": "colPort",
+ "header": "Port",
+ "width": "1fr",
+ "element": {
+ "type": "Microsoft.Common.TextBox",
+ "placeholder": "Input a port...",
+ "toolTip": "Public port for the target service, it's suggested to use 7001 for Oracle WebLogic Administration Server, and 8001 for cluster.",
+ "constraints": {
+ "required": true,
+ "validations": [
+ {
+ "isValid": "[lessOrEquals(length(filter(steps('section_appGateway').lbSVCInfo.lbSVC, (item) => equals(item.colTarget, last(take(steps('section_appGateway').lbSVCInfo.lbSVC, $rowIndex)).colTarget))),1)]",
+ "message": "You can not select the same target repeatedly."
+ },
+ {
+ "regex": "^()([1-9]|[1-5]?[0-9]{2,4}|6[1-4][0-9]{3}|65[1-4][0-9]{2}|655[1-2][0-9]|6553[1-5])$",
+ "message": "Only numbers are allowed, and the value must be 1-65535."
+ }
+ ]
+ }
+ }
}
]
- },
- "visible": "[steps('section_appGateway').appgwIngress.enableAppGateway]"
+ }
}
],
- "visible": true
+ "visible": "[equals(steps('section_appGateway').loadBalancingOptions, 'lbservice')]"
}
]
},
{
"name": "section_dnsConfiguration",
"type": "Microsoft.Common.Section",
- "label": "DNS Configuration",
+ "label": "DNS",
"elements": [
{
"name": "dnsConfigurationText",
"type": "Microsoft.Common.TextBlock",
"visible": true,
"options": {
- "text": "Selecting 'Yes' here will cause the template to provision Oracle WebLogic Server Administration Console, cluster, Remote Console and custom T3 channel using custom DNS Name (example: admin.contoso.com)",
+ "text": "Selecting 'Yes' here will cause the offer to provision Oracle WebLogic Server Administration Console, cluster, and Remote Console using custom DNS Name (example: admin.contoso.com)",
"link": {
"label": "Learn more",
"uri": "https://aka.ms/arm-oraclelinux-wls-dns"
@@ -1370,7 +1473,7 @@
{
"name": "enableDNSConfiguration",
"type": "Microsoft.Common.OptionsGroup",
- "label": "Configure Custom DNS Alias",
+ "label": "Custom DNS Alias",
"defaultValue": "No",
"toolTip": "Select 'Yes' to configure Custom DNS Alias.",
"constraints": {
@@ -1428,7 +1531,7 @@
"toolTip": "Name of the resource group which contains the DNS Zone in current subscription",
"constraints": {
"required": true,
- "regex": "^[a-z0-9A-Z.\\-_()]{0,89}([a-z0-9A-Z\\-_()]{1})$",
+ "regex": "^[a-z0-9A-Z.\\-_()]{0,89}([a-z0-9A-Z\\-_()]{1}){3,63}$",
"validationMessage": "[if(greater(length(steps('section_dnsConfiguration').dnsZoneResourceGroup), 90),'Resource group names only allow up to 90 characters.', 'Resource group names only allow alphanumeric characters, periods, underscores, hyphens and parenthesis and cannot end in a period.')]"
},
"visible": "[and(steps('section_dnsConfiguration').enableDNSConfiguration,steps('section_dnsConfiguration').bringDNSZone)]"
@@ -1436,9 +1539,9 @@
{
"name": "dnszoneAdminConsoleLabel",
"type": "Microsoft.Common.TextBox",
- "label": "Label for Oracle WebLogic Administration Console",
+ "label": "Label for Oracle WebLogic Server Administration Console",
"defaultValue": "admin",
- "toolTip": "Specify a label to generate subdomain of Oracle WebLogic Administration Console",
+ "toolTip": "Specify a label to generate subdomain of Oracle WebLogic Server Administration Console",
"constraints": {
"required": true,
"validations": [
@@ -1452,14 +1555,14 @@
}
]
},
- "visible": "[steps('section_dnsConfiguration').enableDNSConfiguration]"
+ "visible": "[and(steps('section_dnsConfiguration').enableDNSConfiguration, not(equals(steps('section_appGateway').loadBalancingOptions,'none')))]"
},
{
"name": "dnszoneAdminT3ChannelLabel",
"type": "Microsoft.Common.TextBox",
- "label": "Label for Oracle WebLogic Admin Server T3 channel",
+ "label": "Label for Oracle WebLogic Administration Server T3 channel",
"defaultValue": "admin-t3",
- "toolTip": "Specify a label to generate subdomain of Oracle WebLogic Admin Server T3 channel",
+ "toolTip": "Specify a label to generate subdomain of Oracle WebLogic Administration Server T3 channel",
"constraints": {
"required": true,
"validations": [
@@ -1473,7 +1576,7 @@
}
]
},
- "visible": "[and(steps('section_dnsConfiguration').enableDNSConfiguration,basics('basicsOptional').enableAdminT3Tunneling, steps('section_appGateway').lbSVCInfo.enableLBSVC)]"
+ "visible": "[and(steps('section_dnsConfiguration').enableDNSConfiguration,basics('basicsOptional').enableAdminT3Tunneling, equals(steps('section_appGateway').loadBalancingOptions,'lbservice'))]"
},
{
"name": "dnszoneGatewayLabel",
@@ -1494,7 +1597,7 @@
}
]
},
- "visible": "[steps('section_dnsConfiguration').enableDNSConfiguration]"
+ "visible": "[and(steps('section_dnsConfiguration').enableDNSConfiguration, not(equals(steps('section_appGateway').loadBalancingOptions,'none')))]"
},
{
"name": "dnszoneClusterT3ChannelLabel",
@@ -1515,7 +1618,7 @@
}
]
},
- "visible": "[and(steps('section_dnsConfiguration').enableDNSConfiguration,basics('basicsOptional').enableClusterT3Tunneling, steps('section_appGateway').lbSVCInfo.enableLBSVC)]"
+ "visible": "[and(steps('section_dnsConfiguration').enableDNSConfiguration,basics('basicsOptional').enableClusterT3Tunneling, equals(steps('section_appGateway').loadBalancingOptions,'lbservice'))]"
}
]
},
@@ -1534,7 +1637,7 @@
"type": "Microsoft.Common.TextBlock",
"visible": true,
"options": {
- "text": "Selecting 'Yes' here and providing the configuration will cause the template to configure the WebLogic Server to connect to the desired pre-existing database. The database must be network accessible to the VNET and subnets created by the template."
+ "text": "Selecting 'Yes' here and providing the configuration will cause the offer to configure the WebLogic Server to connect to the desired pre-existing database. The database must be network accessible to the VNET and subnets created by the offer."
}
},
{
@@ -1571,7 +1674,7 @@
"constraints": {
"allowedValues": [
{
- "label": "Azure database for PostgreSQL",
+ "label": "PostgreSQL (Supports passwordless connection)",
"value": "postgresql"
},
{
@@ -1579,14 +1682,31 @@
"value": "oracle"
},
{
- "label": "Azure SQL",
+ "label": "Microsoft SQL Server (Supports passwordless connection)",
"value": "sqlserver"
+ },
+ {
+ "label": "MySQL (Supports passwordless connection)",
+ "value": "mysql"
+ },
+ {
+ "label": "Other",
+ "value": "otherdb"
}
],
"required": true
},
"visible": true
},
+ {
+ "name": "mysqlJDBCDriverInfo",
+ "type": "Microsoft.Common.InfoBox",
+ "visible": "[and(bool(steps('section_database').enableDB),equals(steps('section_database').databaseConnectionInfo.databaseType, 'mysql'))]",
+ "options": {
+ "icon": "Info",
+ "text": "To support passwordless connection and various functionalities, the offer will upgrade the
Oracle WebLogic Server MySQL driver with recent MySQL Connector Java driver>."
+ }
+ },
{
"name": "jdbcDataSourceName",
"type": "Microsoft.Common.TextBox",
@@ -1594,7 +1714,7 @@
"toolTip": "The JNDI name for the database JDBC connection",
"defaultValue": "",
"constraints": {
- "required": "[bool(steps('section_database').enableDB)]",
+ "required": true,
"regex": "^[a-zA-Z0-9./_-]{1,30}$",
"validationMessage": "The value must be 1-30 characters long and must only contain letters, numbers, hyphens (-), underscores (_), periods (.) and slashes (/)."
},
@@ -1607,12 +1727,91 @@
"toolTip": "The JDBC connection string for the database",
"defaultValue": "",
"constraints": {
- "required": "[bool(steps('section_database').enableDB)]",
- "regex": "[concat('^jdbc:', coalesce(steps('section_database').databaseConnectionInfo.databaseType, ''), '.*$')]",
+ "required": true,
+ "validations": [
+ {
+ "regex": "^jdbc:.*$",
+ "message": "A valid JDBC URL must start with 'jdbc:'."
+ },
+ {
+ "isValid": "[startsWith(steps('section_database').databaseConnectionInfo.dsConnectionURL, concat('jdbc:', steps('section_database').databaseConnectionInfo.databaseType))]",
+ "message": "A valid JDBC URL for the chosen database type must be provided."
+ },
+ {
+ "isValid": "[if(and(bool(steps('section_database').databaseConnectionInfo.enablePswlessConnection), equals(steps('section_database').databaseConnectionInfo.databaseType, 'mysql')), and(not(contains(steps('section_database').databaseConnectionInfo.dsConnectionURL, 'defaultAuthenticationPlugin')),not(contains(steps('section_database').databaseConnectionInfo.dsConnectionURL, 'authenticationPlugins')), not(contains(steps('section_database').databaseConnectionInfo.dsConnectionURL, 'azure.clientId'))), 'true')]",
+ "message": "The offer will append defaultAuthenticationPlugin, authenticationPlugins with Azure provided plugins, and append azure.clientId with your managed identity client ID automatically, please do not specify them in your connection string."
+ },
+ {
+ "isValid": "[if(and(bool(steps('section_database').databaseConnectionInfo.enablePswlessConnection), equals(steps('section_database').databaseConnectionInfo.databaseType, 'postgresql')), and(not(contains(steps('section_database').databaseConnectionInfo.dsConnectionURL, 'authenticationPluginClassName')),not(contains(steps('section_database').databaseConnectionInfo.dsConnectionURL, 'azure.clientId'))), 'true')]",
+ "message": "The offer will append authenticationPluginClassName with Azure provided plugins, and append azure.clientId with your managed identity client ID automatically, please do not specify them in your connection string."
+ },
+ {
+ "isValid": "[if(and(bool(steps('section_database').databaseConnectionInfo.enablePswlessConnection0),equals(steps('section_database').databaseConnectionInfo.databaseType, 'sqlserver')), greater(length(steps('section_database').databaseConnectionInfo.dbIdentity.userAssignedIdentities),0), bool('true'))]",
+ "message": "You must select at least one managed identity that has access to your database."
+ }
+ ]
+ },
+ "visible": "[and(bool(steps('section_database').enableDB), not(equals(steps('section_database').databaseConnectionInfo.databaseType, 'otherdb')))]"
+ },
+ {
+ "name": "dsConnectionURL2",
+ "type": "Microsoft.Common.TextBox",
+ "label": "DataSource Connection String",
+ "toolTip": "The JDBC connection string for the database",
+ "defaultValue": "",
+ "constraints": {
+ "required": true,
+ "regex": "^jdbc:.*$",
"validationMessage": "A valid JDBC URL for the chosen database type must be provided"
},
+ "visible": "[and(bool(steps('section_database').enableDB), equals(steps('section_database').databaseConnectionInfo.databaseType, 'otherdb'))]"
+ },
+ {
+ "name": "dbGlobalTranPro",
+ "type": "Microsoft.Common.DropDown",
+ "label": "Global transactions protocol",
+ "defaultValue": "OnePhaseCommit",
+ "multiLine": true,
+ "toolTip": "Determines the transaction protocol (global transaction processing behavior) for the data source.",
+ "constraints": {
+ "allowedValues": [
+ {
+ "label": "TwoPhaseCommit",
+ "description": "Standard XA transaction processing. Requires an XA driver.",
+ "value": "TwoPhaseCommit"
+ },
+ {
+ "label": "LoggingLastResource",
+ "description": "A performance enhancement for one non-XA resource.",
+ "value": "LoggingLastResource"
+ },
+ {
+ "label": "EmulateTwoPhaseCommit",
+ "description": "Enables one non-XA resource to participate in a global transaction, but has some risk to data.",
+ "value": "EmulateTwoPhaseCommit"
+ },
+ {
+ "label": "OnePhaseCommit",
+ "description": "One-phase XA transaction processing using a non-XA driver. This is the default setting.",
+ "value": "OnePhaseCommit"
+ },
+ {
+ "label": "None",
+ "description": "Support for local transactions only.",
+ "value": "None"
+ }
+ ],
+ "required": true
+ },
"visible": true
},
+ {
+ "name": "enablePswlessConnection0",
+ "type": "Microsoft.Common.CheckBox",
+ "label": "Use passwordless datasource connection",
+ "toolTip": "Use passwordless datasource connection.",
+ "visible": "[and(bool(steps('section_database').enableDB),equals(steps('section_database').databaseConnectionInfo.databaseType, 'sqlserver'))]"
+ },
{
"name": "dbUser",
"type": "Microsoft.Common.TextBox",
@@ -1620,11 +1819,26 @@
"toolTip": "Use only letters and numbers",
"defaultValue": "",
"constraints": {
- "required": "[bool(steps('section_database').enableDB)]",
- "regex": "^(?=.{1,128}$)[a-zA-Z](?!.*--)(?!.*@@)(?!.*-@)(?!.*@-)[a-zA-Z0-9-@]*[a-zA-Z0-9]$",
- "validationMessage": "The value must be 1-128 characters long and must only contain letters, numbers, hyphen(-) and the at sign, no hyphen allowed at the beginning and the end of database username."
+ "required": true,
+ "validations": [
+ {
+ "regex": "^(?=.{1,128}$)[a-zA-Z](?!.*--)(?!.*@@)(?!.*-@)(?!.*@-)[a-zA-Z0-9-@]*[a-zA-Z0-9]$",
+ "message": "The value must be 1-128 characters long and must only contain letters, numbers, hyphen(-) and the at sign, no hyphen allowed at the beginning and the end of database username."
+ },
+ {
+ "isValid": "[if(bool(steps('section_database').databaseConnectionInfo.enablePswlessConnection), greater(length(steps('section_database').databaseConnectionInfo.dbIdentity.userAssignedIdentities),0), bool('true'))]",
+ "message": "You must select at least one managed identity that has access to your database."
+ }
+ ]
},
- "visible": true
+ "visible": "[and(bool(steps('section_database').enableDB), not(and(steps('section_database').databaseConnectionInfo.enablePswlessConnection0, equals(steps('section_database').databaseConnectionInfo.databaseType, 'sqlserver'))))]"
+ },
+ {
+ "name": "enablePswlessConnection",
+ "type": "Microsoft.Common.CheckBox",
+ "label": "Use passwordless datasource connection",
+ "toolTip": "Use passwordless datasource connection.",
+ "visible": "[and(bool(steps('section_database').enableDB),or(equals(steps('section_database').databaseConnectionInfo.databaseType, 'mysql'),equals(steps('section_database').databaseConnectionInfo.databaseType, 'postgresql')))]"
},
{
"name": "dbPassword",
@@ -1635,38 +1849,284 @@
},
"toolTip": "Database Password",
"constraints": {
- "required": "[bool(steps('section_database').enableDB)]",
- "regex": "^((?=.*[0-9])(?=.*[a-zA-Z!@#$%^&*])).{5,128}$",
- "validationMessage": "The password must be between 5 and 128 characters long and have at least one number."
+ "required": true,
+ "regex": "^((?=.*[0-9])(?=.*[a-zA-Z!@#$%^&*])).{6,128}$",
+ "validationMessage": "The password must be between 6 and 128 characters long and have at least one number."
},
"options": {
"hideConfirmation": false
},
- "visible": true
+ "visible": "[and(bool(steps('section_database').enableDB), not(or(steps('section_database').databaseConnectionInfo.enablePswlessConnection, steps('section_database').databaseConnectionInfo.enablePswlessConnection0)))]"
+ },
+ {
+ "name": "dbIdentity",
+ "type": "Microsoft.ManagedIdentity.IdentitySelector",
+ "label": "Connect database with Managed Identity",
+ "toolTip": {
+ "userAssignedIdentity": "Select a user assigned identity that has access to your database. For how to create a database user for your managed identity, see https://aka.ms/javaee-db-identity."
+ },
+ "defaultValue": {
+ "systemAssignedIdentity": "Off"
+ },
+ "options": {
+ "hideSystemAssignedIdentity": true,
+ "hideUserAssignedIdentity": false
+ },
+ "visible": "[and(bool(steps('section_database').enableDB), or(steps('section_database').databaseConnectionInfo.enablePswlessConnection, steps('section_database').databaseConnectionInfo.enablePswlessConnection0))]"
+ },
+ {
+ "name": "dbDriverLibraries",
+ "type": "Microsoft.Common.FileUpload",
+ "label": "DataSource driver (.jar)",
+ "toolTip": "The datasource driver jar package for the specified database.",
+ "constraints": {
+ "required": true,
+ "accept": ".jar"
+ },
+ "options": {
+ "multiple": true,
+ "uploadMode": "url",
+ "openMode": "binary"
+ },
+ "visible": "[and(bool(steps('section_database').enableDB), equals(steps('section_database').databaseConnectionInfo.databaseType, 'otherdb'))]"
+ },
+ {
+ "name": "dbDriverInfoBox",
+ "type": "Microsoft.Common.InfoBox",
+ "options": {
+ "icon": "Info",
+ "text": "WebLogic Server provides support for application data access to any database using a JDBC-compliant driver.
Select here for more details.",
+ "uri": "https://aka.ms/wls-aks-dbdriver"
+ },
+ "visible": "[and(bool(steps('section_database').enableDB), equals(steps('section_database').databaseConnectionInfo.databaseType, 'otherdb'))]"
+ },
+ {
+ "name": "dbDriverName",
+ "type": "Microsoft.Common.TextBox",
+ "label": "DataSource driver name",
+ "toolTip": "The driver name for the database",
+ "placeholder": "com.informix.jdbc.IfxDriver",
+ "constraints": {
+ "required": true,
+ "regex": "^[a-zA-Z_][a-zA-Z0-9_]+(\\.[a-zA-Z_][a-zA-Z0-9_]+){1,50}$",
+ "validationMessage": "A valid driver name for the chosen database type must be provided"
+ },
+ "visible": "[and(bool(steps('section_database').enableDB), equals(steps('section_database').databaseConnectionInfo.databaseType, 'otherdb'))]"
+ },
+ {
+ "name": "dbTestTableName",
+ "type": "Microsoft.Common.TextBox",
+ "label": "Test table name",
+ "toolTip": "The name of the database table to use when testing physical database connections. ",
+ "constraints": {
+ "required": true,
+ "regex": "^.*$",
+ "validationMessage": "A test table name for the chosen database type must be provided"
+ },
+ "visible": "[and(bool(steps('section_database').enableDB), equals(steps('section_database').databaseConnectionInfo.databaseType, 'otherdb'))]"
}
],
"visible": "[bool(steps('section_database').enableDB)]"
}
]
+ },
+ {
+ "name": "section_autoScaling",
+ "type": "Microsoft.Common.Section",
+ "label": "Autoscaling",
+ "subLabel": {
+ "preValidation": "Configure Horizontal Autoscaling",
+ "postValidation": "Done"
+ },
+ "bladeTitle": "Horizontal Autoscaling",
+ "elements": [
+ {
+ "name": "aboutAutoscaling",
+ "type": "Microsoft.Common.TextBlock",
+ "visible": true,
+ "options": {
+ "text": "Selecting 'Yes' here and providing the configuration will cause the offer configure metric to scale the WebLogic cluster."
+ }
+ },
+ {
+ "name": "enableAutoscaling",
+ "type": "Microsoft.Common.OptionsGroup",
+ "label": "Provision resources for horizontal autoscaling?",
+ "defaultValue": "No",
+ "toolTip": "Select 'Yes' and provide required info to configure horizontal autoscaling.",
+ "constraints": {
+ "allowedValues": [
+ {
+ "label": "Yes",
+ "value": "true"
+ },
+ {
+ "label": "No",
+ "value": "false"
+ }
+ ],
+ "required": true
+ }
+ },
+ {
+ "name": "autoScalingInfo",
+ "type": "Microsoft.Common.Section",
+ "label": "Horizontal autoscaling settings",
+ "elements": [
+ {
+ "name": "metricSource",
+ "type": "Microsoft.Common.OptionsGroup",
+ "label": "Select autoscaling option. ",
+ "defaultValue": "Kubernetes Metrics Server (simple autoscaling)",
+ "toolTip": "If you select Kubernetes Metrics Server, this offer configures WebLogic Server to scale based on CPU or memory utilization. If you select WebLogic Monitoring Exporter, this offer configures WebLogic Monitoring Exporter to scrape WebLogic Server metrics and feed them to Azure Monitor Managed Service for Prometheus; integrates KEDA with your AKS cluster to monitor Azure Monitor workspace and feed data to AKS. You can create KEDA scaler based on Java metrics from Azure Monitor workspace",
+ "constraints": {
+ "allowedValues": [
+ {
+ "label": "Kubernetes Metrics Server (simple autoscaling)",
+ "value": "kms"
+ },
+ {
+ "label": "WebLogic Monitoring Exporter (advanced autoscaling)",
+ "value": "wme"
+ }
+ ],
+ "required": true
+ },
+ "visible": true
+ },
+ {
+ "name": "infoKms",
+ "type": "Microsoft.Common.InfoBox",
+ "visible": "[equals(steps('section_autoScaling').autoScalingInfo.metricSource, 'kms')]",
+ "options": {
+ "icon": "None",
+ "text": "This option configures and runs Kubernetes Horizontal Pod Autoscaler (HPA) to scale a WebLogic cluster, based on the CPU or memory utilization. The HPA autoscales WebLogic Server instances from a minimum of 1 cluster members up to maximum of cluster members, and the scale up or down action occur when the average CPU/memory is consistently over the utilization.
Default maximum of cluster member is 5. You can set it from Basics -> Optional Basic Configuration -> Maximum dynamic cluster size.Default CPU request is 200m.Default memory request is 1.5Gi.",
+ "uri": "https://aka.ms/wlsoperator-autoscaling-hpa"
+ }
+ },
+ {
+ "name": "infoWme",
+ "type": "Microsoft.Common.InfoBox",
+ "visible": "[equals(steps('section_autoScaling').autoScalingInfo.metricSource, 'wme')]",
+ "options": {
+ "icon": "None",
+ "text": "This option installs all the software necessary to allow you to create Java metric aware KEDA scaling rules.
The offer provisions the following deployments. Right-click and select Open Link in New Tab to follow links:After the provisioning is completed, you can create KEDA scaling rules. A sample rule is provided in the deployment outputs. The following steps show how to see the sample rule.- View the resource group for this deployment in the Azure portal.
- In the Settings section, select Deployments.
- Select the oldest deployment. The name of the deployment looks similar to oracle.20210620-wls-on-aks.
- Select Outputs.
- The shellCmdtoOutputKedaScalerSample value is the base64 string of a scaler sample. Copy the value and run it in your terminal.
- For guidance on how to complete the configuration, see Tutorial: Migrate Oracle WebLogic Server to AKS with KEDA scaler based on Prometheus Metrics
",
+ "uri": "https://aka.ms/wls-aks-keda-scaler"
+ }
+ },
+ {
+ "name": "kmsMetrics",
+ "type": "Microsoft.Common.OptionsGroup",
+ "label": "Select metric. ",
+ "defaultValue": "Average CPU Utilization",
+ "toolTip": "Select metric.",
+ "constraints": {
+ "allowedValues": [
+ {
+ "label": "Average CPU Utilization",
+ "value": "cpu"
+ },
+ {
+ "label": "Average Memory Utilization",
+ "value": "memory"
+ }
+ ],
+ "required": true
+ },
+ "visible": "[equals(steps('section_autoScaling').autoScalingInfo.metricSource, 'kms')]"
+ },
+ {
+ "name": "averageCpuUtilization",
+ "type": "Microsoft.Common.Slider",
+ "min": 10,
+ "max": 100,
+ "label": "Average CPU Utilization",
+ "subLabel": "Percent",
+ "defaultValue": 60,
+ "showStepMarkers": false,
+ "toolTip": "Pick Average CPU Utilization in Percent",
+ "constraints": {
+ "required": false
+ },
+ "visible": "[equals(steps('section_autoScaling').autoScalingInfo.kmsMetrics, 'cpu')]"
+ },
+ {
+ "name": "averageMemoryUtilization",
+ "type": "Microsoft.Common.Slider",
+ "min": 10,
+ "max": 100,
+ "label": "Average Memory Utilization",
+ "subLabel": "Percent",
+ "defaultValue": 60,
+ "showStepMarkers": false,
+ "toolTip": "Pick Average Memory Utilization in Percent",
+ "constraints": {
+ "required": false
+ },
+ "visible": "[equals(steps('section_autoScaling').autoScalingInfo.kmsMetrics, 'memory')]"
+ }
+ ],
+ "visible": "[bool(steps('section_autoScaling').enableAutoscaling)]"
+ }
+ ]
+ },
+ {
+ "name": "tags",
+ "label": "Tags",
+ "elements": [
+ {
+ "name": "tagsByResource",
+ "type": "Microsoft.Common.TagsByResource",
+ "resources": [
+ "${identifier.managedClusters}",
+ "${identifier.applicationGateways}",
+ "${identifier.registries}",
+ "${identifier.virtualMachines}",
+ "${identifier.virtualMachinesExtensions}",
+ "${identifier.virtualNetworks}",
+ "${identifier.networkInterfaces}",
+ "${identifier.networkSecurityGroups}",
+ "${identifier.publicIPAddresses}",
+ "${identifier.storageAccounts}",
+ "${identifier.vaults}",
+ "${identifier.userAssignedIdentities}",
+ "${identifier.dnszones}",
+ "${identifier.workspaces}",
+ "${identifier.accounts}",
+ "${identifier.deploymentScripts}"
+ ],
+ "toolTip": "Tags help you organize your resources and categorize them for billing or management purposes. You can apply tags to resources deployed by the offer."
+ }
+ ]
}
],
"outputs": {
- "acrName": "[last(split(steps('section_aks').acrInfo.acrSelector.id, '/'))]",
+ "acrName": "[last(split(steps('section_aks').imageInfo.oracleAcrSelector.id, '/'))]",
+ "acrResourceGroupName": "[last(take(split(steps('section_aks').imageInfo.oracleAcrSelector.id, '/'),5))]",
"aksAgentPoolNodeCount": "[steps('section_aks').clusterInfo.aksNodeCount]",
+ "aksAgentPoolNodeMaxCount": "[steps('section_aks').clusterInfo.aksNodeMaxCount]",
"aksClusterName": "[last(split(steps('section_aks').clusterInfo.aksClusterSelector.id, '/'))]",
"aksClusterRGName": "[last(take(split(steps('section_aks').clusterInfo.aksClusterSelector.id, '/'), 5))]",
"appGatewayCertificateOption": "[steps('section_appGateway').appgwIngress.certificateOption]",
- "appGatewaySSLBackendRootCertData": "[steps('section_appGateway').appgwIngress.keyVaultBackendSSLCertData]",
- "appGatewaySSLCertData": "[steps('section_appGateway').appgwIngress.keyVaultSSLCertData]",
+ "appGatewaySSLBackendRootCertData": "[steps('section_appGateway').appgwIngress.uploadedSSLCertData]",
+ "appGatewaySSLCertData": "[steps('section_appGateway').appgwIngress.appGatewaySSLCertData]",
"appGatewaySSLCertPassword": "[steps('section_appGateway').appgwIngress.appGatewaySSLCertPassword]",
"appgwForAdminServer": "[steps('section_appGateway').appgwIngress.appgwForAdminServer]",
"appgwForRemoteConsole": "[steps('section_appGateway').appgwIngress.appgwForAdminRemote]",
+ "appgwUsePrivateIP": "[steps('section_appGateway').appgwIngress.appgwUsePrivateIP]",
"appPackageUrls": "[steps('section_aks').jeeAppInfo.appPackageUrl]",
"appReplicas": "[int(steps('section_aks').jeeAppInfo.appReplicas)]",
- "createACR": "[bool(steps('section_aks').acrInfo.createACR)]",
+ "averageCpuUtilization": "[steps('section_autoScaling').autoScalingInfo.averageCpuUtilization]",
+ "averageMemoryUtilization": "[steps('section_autoScaling').autoScalingInfo.averageMemoryUtilization]",
+ "createACR": "[bool(steps('section_aks').imageInfo.oracleCreateACR)]",
"createAKSCluster": "[bool(steps('section_aks').clusterInfo.createAKSCluster)]",
"createDNSZone": "[not(bool(steps('section_dnsConfiguration').bringDNSZone))]",
+ "dbDriverLibrariesUrls": "[steps('section_database').databaseConnectionInfo.dbDriverLibraries]",
+ "dbDriverName": "[steps('section_database').databaseConnectionInfo.dbDriverName]",
+ "dbGlobalTranPro": "[steps('section_database').databaseConnectionInfo.dbGlobalTranPro]",
+ "dbIdentity": "[steps('section_database').databaseConnectionInfo.dbIdentity]",
"dbPassword": "[steps('section_database').databaseConnectionInfo.dbPassword]",
+ "dbTestTableName": "[steps('section_database').databaseConnectionInfo.dbTestTableName]",
"dbUser": "[steps('section_database').databaseConnectionInfo.dbUser]",
"databaseType": "[steps('section_database').databaseConnectionInfo.databaseType]",
"dnszoneAdminConsoleLabel": "[steps('section_dnsConfiguration').dnszoneAdminConsoleLabel]",
@@ -1675,40 +2135,27 @@
"dnszoneClusterT3ChannelLabel": "[steps('section_dnsConfiguration').dnszoneClusterT3ChannelLabel]",
"dnszoneName": "[steps('section_dnsConfiguration').dnszoneName]",
"dnszoneRGName": "[steps('section_dnsConfiguration').dnsZoneResourceGroup]",
- "dsConnectionURL": "[steps('section_database').databaseConnectionInfo.dsConnectionURL]",
- "enableAppGWIngress": "[steps('section_appGateway').appgwIngress.enableAppGateway]",
- "enableAzureMonitoring": "[bool(steps('section_aks').clusterInfo.enableAzureMonitoring)]",
- "enableAzureFileShare": "[bool(steps('section_aks').clusterInfo.enableAzureFileShare)]",
- "enableCookieBasedAffinity": "[bool(steps('section_appGateway').appgwIngress.enableCookieBasedAffinity)]",
+ "dsConnectionURL": "[coalesce(steps('section_database').databaseConnectionInfo.dsConnectionURL, steps('section_database').databaseConnectionInfo.dsConnectionURL2, 'null')]",
+ "enableAppGWIngress": "[equals(steps('section_appGateway').loadBalancingOptions, 'agic')]",
+ "enableAutoscaling": "[bool(steps('section_autoScaling').enableAutoscaling)]",
+ "enableAzureMonitoring": "[bool(steps('section_aks').aksAdvancedInfo.enableAzureMonitoring)]",
+ "enableAzureFileShare": "[bool(steps('section_aks').aksAdvancedInfo.enableAzureFileShare)]",
+ "enableCookieBasedAffinity": "[not(bool(steps('section_appGateway').appgwIngress.enableCookieBasedAffinity))]",
"enableCustomSSL": "[bool(steps('section_sslConfiguration').enableCustomSSL)]",
"enableDB": "[bool(steps('section_database').enableDB)]",
"enableDNSConfiguration": "[bool(steps('section_dnsConfiguration').enableDNSConfiguration)]",
"enableAdminT3Tunneling": "[basics('basicsOptional').enableAdminT3Tunneling]",
"enableClusterT3Tunneling": "[basics('basicsOptional').enableClusterT3Tunneling]",
- "identity": "[basics('basicsRequired').identity]",
+ "enablePswlessConnection": "[or(bool(steps('section_database').databaseConnectionInfo.enablePswlessConnection), bool(steps('section_database').databaseConnectionInfo.enablePswlessConnection0))]",
"jdbcDataSourceName": "[steps('section_database').databaseConnectionInfo.jdbcDataSourceName]",
- "lbSvcValues": "[steps('section_appGateway').lbSVCInfo.lbSVC]",
+ "isSSOSupportEntitled": "[bool(steps('section_aks').imageInfo.isSSOSupportEntitled)]",
+ "lbSvcValues": "[if(not(empty(first(if(empty(steps('section_appGateway').lbSVCInfo.lbSVC),parse('[{\"colName\":\"\"}]'), steps('section_appGateway').lbSVCInfo.lbSVC)).colName)),steps('section_appGateway').lbSVCInfo.lbSVC, parse('[]'))]",
"location": "[location()]",
- "keyVaultName": "[steps('section_appGateway').appgwIngress.keyVaultName]",
- "keyVaultResourceGroup": "[steps('section_appGateway').appgwIngress.keyVaultResourceGroup]",
- "keyVaultSSLBackendRootCertDataSecretName": "[steps('section_appGateway').appgwIngress.keyVaultBackendSSLCertDataSecretName]",
- "keyVaultSSLCertDataSecretName": "[steps('section_appGateway').appgwIngress.keyVaultSSLCertDataSecretName]",
- "keyVaultSSLCertPasswordSecretName": "[steps('section_appGateway').appgwIngress.keyVaultSSLCertPasswordSecretName]",
+ "hpaScaleType": "[steps('section_autoScaling').autoScalingInfo.kmsMetrics]",
"managedServerPrefix": "[basics('basicsOptional').managedServerPrefix]",
- "ocrSSOPSW": "[basics('basicsRequired').ocrSSOPassword]",
- "ocrSSOUser": "[basics('basicsRequired').ocrSSOUserName]",
- "servicePrincipal": "[steps('section_appGateway').appgwIngress.servicePrincipal]",
- "sslConfigurationAccessOption": "[steps('section_sslConfiguration').sslConfigurationAccessOption]",
- "sslKeyVaultCustomIdentityKeyStoreDataSecretName": "[steps('section_sslConfiguration').keyVaultStoredCustomSSLSettings.keyVaultCustomIdentityKeyStoreDataSecretName]",
- "sslKeyVaultCustomIdentityKeyStorePassPhraseSecretName": "[steps('section_sslConfiguration').keyVaultStoredCustomSSLSettings.keyVaultCustomIdentityKeyStorePassPhraseSecretName]",
- "sslKeyVaultCustomIdentityKeyStoreType": "[steps('section_sslConfiguration').keyVaultStoredCustomSSLSettings.keyVaultCustomIdentityKeyStoreType]",
- "sslKeyVaultCustomTrustKeyStoreDataSecretName": "[steps('section_sslConfiguration').keyVaultStoredCustomSSLSettings.keyVaultCustomTrustKeyStoreDataSecretName]",
- "sslKeyVaultCustomTrustKeyStorePassPhraseSecretName": "[steps('section_sslConfiguration').keyVaultStoredCustomSSLSettings.keyVaultCustomTrustKeyStorePassPhraseSecretName]",
- "sslKeyVaultCustomTrustKeyStoreType": "[steps('section_sslConfiguration').keyVaultStoredCustomSSLSettings.keyVaultCustomTrustKeyStoreType]",
- "sslKeyVaultName": "[steps('section_sslConfiguration').keyVaultStoredCustomSSLSettings.keyVaultName]",
- "sslKeyVaultPrivateKeyAliasSecretName": "[steps('section_sslConfiguration').keyVaultStoredCustomSSLSettings.keyVaultPrivateKeyAliasSecretName]",
- "sslKeyVaultPrivateKeyPassPhraseSecretName": "[steps('section_sslConfiguration').keyVaultStoredCustomSSLSettings.keyVaultPrivateKeyPassPhraseSecretName]",
- "sslKeyVaultResourceGroup": "[steps('section_sslConfiguration').keyVaultStoredCustomSSLSettings.keyVaultResourceGroup]",
+ "newOrExistingVnetForApplicationGateway": "[steps('section_appGateway').appgwIngress.vnetForApplicationGateway.newOrExisting]",
+ "ocrSSOPSW": "[steps('section_aks').imageInfo.ocrSSOPassword]",
+ "ocrSSOUser": "[steps('section_aks').imageInfo.ocrSSOUserName]",
"sslUploadedCustomIdentityKeyStoreData": "[steps('section_sslConfiguration').uploadedCustomSSLSettings.uploadedCustomIdentityKeyStoreData]",
"sslUploadedCustomIdentityKeyStorePassphrase": "[steps('section_sslConfiguration').uploadedCustomSSLSettings.uploadedCustomIdentityKeyStorePassphrase]",
"sslUploadedCustomIdentityKeyStoreType": "[steps('section_sslConfiguration').uploadedCustomSSLSettings.uploadedCustomIdentityKeyStoreType]",
@@ -1717,15 +2164,25 @@
"sslUploadedCustomTrustKeyStoreType": "[steps('section_sslConfiguration').uploadedCustomSSLSettings.uploadedCustomTrustKeyStoreType]",
"sslUploadedPrivateKeyAlias": "[steps('section_sslConfiguration').uploadedCustomSSLSettings.uploadedPrivateKeyAlias]",
"sslUploadedPrivateKeyPassPhrase": "[steps('section_sslConfiguration').uploadedCustomSSLSettings.uploadedPrivateKeyPassPhrase]",
+ "useHpa": "[if(equals(steps('section_autoScaling').autoScalingInfo.metricSource, 'kms'), true, false)]",
"useInternalLB": "[bool(steps('section_appGateway').lbSVCInfo.enableInternalLB)]",
+ "useOracleImage": "[if(bool(steps('section_aks').aksAdvancedInfo.useAcrImage), false, true)]",
+ "userProvidedAcr": "[last(split(steps('section_aks').aksAdvancedInfo.userProvidedAcrSelector.id, '/'))]",
+ "userProvidedAcrRgName": "[last(take(split(steps('section_aks').aksAdvancedInfo.userProvidedAcrSelector.id, '/'),5))]",
+ "userProvidedImagePath": "[steps('section_aks').aksAdvancedInfo.userProvidedImagePath]",
+ "validateApplications": true,
+ "vnetForApplicationGateway": "[steps('section_appGateway').appgwIngress.vnetForApplicationGateway]",
+ "vnetRGNameForApplicationGateway": "[steps('section_appGateway').appgwIngress.vnetForApplicationGateway.resourceGroup]",
+ "vmSize": "[steps('section_aks').clusterInfo.nodeVMSizeSelector]",
"wdtRuntimePassword": "[basics('basicsRequired').wdtRuntimePassword]",
"wlsClusterSize": "[basics('basicsOptional').wlsClusterSize]",
"wlsDomainName": "[basics('basicsOptional').wlsDomainName]",
"wlsDomainUID": "[basics('basicsOptional').wlsDomainUID]",
- "wlsImageTag": "[steps('section_aks').imageInfo.fromImage]",
+ "wlsImageTag": "[if(equals(steps('section_aks').imageInfo.oracleImageSelector, 'others'), steps('section_aks').imageInfo.fromOracleImage, steps('section_aks').imageInfo.oracleImageSelector)]",
"wlsJavaOption": "[basics('basicsOptional').wlsJavaOption]",
"wlsPassword": "[basics('basicsRequired').wlsPassword]",
- "wlsUserName": "[basics('basicsRequired').wlsUserName]"
+ "wlsUserName": "[basics('basicsRequired').wlsUserName]",
+ "tagsByResource": "[steps('tags').tagsByResource]"
}
}
}
diff --git a/weblogic-azure-aks/src/main/arm/scripts/appgw-helm-config.yaml.template b/weblogic-azure-aks/src/main/arm/scripts/appgw-helm-config.yaml.template
deleted file mode 100644
index 3f9dd7537..000000000
--- a/weblogic-azure-aks/src/main/arm/scripts/appgw-helm-config.yaml.template
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright (c) 2021, Oracle Corporation and/or its affiliates.
-# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl.
-#
-# Based on https://raw.githubusercontent.com/Azure/application-gateway-kubernetes-ingress/master/docs/examples/sample-helm-config.yaml
-
-# This file contains the essential configs for the ingress controller Helm chart
-
-# Verbosity level of the App Gateway Ingress Controller
-verbosityLevel: 3
-
-################################################################################
-# Specify which application gateway the ingress controller will manage
-#
-appgw:
- subscriptionId: @SUB_ID@
- resourceGroup: @APPGW_RG_NAME@
- name: @APPGW_NAME@
- usePrivateIP: false
-
- # Setting appgw.shared to "true" will create an AzureIngressProhibitedTarget CRD.
- # This prohibits AGIC from applying config for any host/path.
- # Use "kubectl get AzureIngressProhibitedTargets" to view and change this.
- shared: false
-
-################################################################################
-# Specify which Kubernetes namespace the ingress controller will watch
-# Default value is "default"
-# Leaving this variable out or setting it to blank or empty string would
-# result in ingress controller observing all acessible namespaces.
-#
-kubernetes:
- watchNamespace: @WATCH_NAMESPACE@
-
-################################################################################
-# Specify the authentication with Azure Resource Manager
-#
-# Two authentication methods are available:
-# - Option 1: AAD-Pod-Identity (https://github.com/Azure/aad-pod-identity)
-# armAuth:
-# type: aadPodIdentity
-# identityResourceID:
-# identityClientID:
-
-armAuth:
- type: servicePrincipal
- secretJSON: @SP_ENCODING_CREDENTIALS@
-
-################################################################################
-# Specify if the cluster is RBAC enabled or not
-rbac:
- # Specifies whether RBAC resources should be created
- create: true
diff --git a/weblogic-azure-aks/src/main/arm/scripts/appgw-ingress-clusterAdmin-roleBinding.yaml b/weblogic-azure-aks/src/main/arm/scripts/appgw-ingress-clusterAdmin-roleBinding.yaml
deleted file mode 100644
index 6bf39ce8e..000000000
--- a/weblogic-azure-aks/src/main/arm/scripts/appgw-ingress-clusterAdmin-roleBinding.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright (c) 2021, Oracle Corporation and/or its affiliates.
-# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
-
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: ingress-azure-admin
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: cluster-admin
-subjects:
-- kind: ServiceAccount
- name: ingress-azure
- namespace: default
\ No newline at end of file
diff --git a/weblogic-azure-aks/src/main/arm/scripts/buildWLSDockerImage.sh b/weblogic-azure-aks/src/main/arm/scripts/buildWLSDockerImage.sh
index 0694ebe0d..0a4b7e58c 100644
--- a/weblogic-azure-aks/src/main/arm/scripts/buildWLSDockerImage.sh
+++ b/weblogic-azure-aks/src/main/arm/scripts/buildWLSDockerImage.sh
@@ -8,14 +8,14 @@ function echo_stderr() {
echo "$@" >&2
}
-# read and from stdin
+# read and from stdin
function read_sensitive_parameters_from_stdin() {
- read azureACRPassword ocrSSOPSW
+ read azureACRShibboleth ocrSSOShibboleth
}
#Function to display usage message
function usage() {
- echo " ./buildWLSDockerImage.sh ./buildWLSDockerImage.sh "
+ echo " | ./buildWLSDockerImage.sh "
if [ $1 -eq 1 ]; then
exit 1
fi
@@ -31,6 +31,11 @@ function validate_status() {
}
function validate_inputs() {
+ if [ -z "$useOracleImage" ]; then
+ echo_stderr "userProvidedImagePath is required. "
+ usage 1
+ fi
+
if [ -z "$wlsImagePath" ]; then
echo_stderr "wlsImagePath is required. "
usage 1
@@ -46,8 +51,8 @@ function validate_inputs() {
usage 1
fi
- if [ -z "$azureACRPassword" ]; then
- echo_stderr "azureACRPassword is required. "
+ if [ -z "$azureACRShibboleth" ]; then
+ echo_stderr "azureACRShibboleth is required. "
usage 1
fi
@@ -61,13 +66,13 @@ function validate_inputs() {
usage 1
fi
- if [ -z "$ocrSSOUser" ]; then
+ if [[ "${useOracleImage,,}" == "${constTrue}" ]] && [ -z "$ocrSSOUser" ]; then
echo_stderr "ocrSSOUser is required. "
usage 1
fi
- if [ -z "$ocrSSOPSW" ]; then
- echo_stderr "ocrSSOPSW is required. "
+ if [[ "${useOracleImage,,}" == "${constTrue}" ]] && [ -z "$ocrSSOShibboleth" ]; then
+ echo_stderr "ocrSSOShibboleth is required. "
usage 1
fi
@@ -90,6 +95,14 @@ function validate_inputs() {
echo_stderr "enableClusterT3Tunneling is required. "
usage 1
fi
+
+ if [ -z "${dbDriversUrls}" ]; then
+ echo_stderr "dbDriversUrls is required. "
+ usage 1
+ fi
+
+ appPackageUrls=$(echo $appPackageUrls | base64 -d)
+ dbDriversUrls=$(echo $dbDriversUrls | base64 -d)
}
function initialize() {
@@ -104,19 +117,90 @@ function initialize() {
mkdir wlsdeploy
mkdir wlsdeploy/config
mkdir wlsdeploy/applications
- mkdir wlsdeploy/domainLibraries
+ mkdir wlsdeploy/classpathLibraries
+ mkdir wlsdeploy/${externalJDBCLibrariesDirectoryName}
}
-# Install docker, zip, unzip and java
-# Download WebLogic Tools
-function install_utilities() {
- # Install docker
+function download_wdt_wit() {
+ local wlsToolingFamilyJsonFile=weblogic_tooling_family.json
+ # download the json file that has wls operator version from weblogic-azure repo.
+ curl -m ${curlMaxTime} --retry ${retryMaxAttempt} -fsL "${gitUrl4WLSToolingFamilyJsonFile}" -o ${wlsToolingFamilyJsonFile}
+ if [ $? -eq 0 ]; then
+ wdtDownloadURL=$(cat ${wlsToolingFamilyJsonFile} | jq ".items[] | select(.key==\"WDT\") | .downloadURL" | tr -d "\"")
+ echo "WDT URL: ${wdtDownloadURL}"
+ witDownloadURL=$(cat ${wlsToolingFamilyJsonFile} | jq ".items[] | select(.key==\"WIT\") | .downloadURL" | tr -d "\"")
+ echo "WIT URL: ${witDownloadURL}"
+ else
+ echo "Use latest WDT and WIT."
+ wdtDownloadURL="https://github.com/oracle/weblogic-deploy-tooling/releases/latest/download/weblogic-deploy.zip"
+ witDownloadURL="https://github.com/oracle/weblogic-image-tool/releases/latest/download/imagetool.zip"
+ fi
+
+ # Download weblogic tools
+ curl -m ${curlMaxTime} --retry ${retryMaxAttempt} -fsL ${wdtDownloadURL} -o weblogic-deploy.zip
+ validate_status "Check status of weblogic-deploy.zip."
+
+ curl -m ${curlMaxTime} --retry ${retryMaxAttempt} -fsL ${witDownloadURL} -o imagetool.zip
+ validate_status "Check status of imagetool.zip."
+}
+
+function download_azure_identity_extensions() {
+ local myPom=pom.xml
+ curl -m ${curlMaxTime} --retry ${retryMaxAttempt} -fsL "${gitUrl4AzureIdentityExtensionsPomFile}" -o ${myPom}
+ validate_status "Check status of downloading Azure Identity Provider JDBC MySQL Pom file."
+
+ echo "download dependencies"
+ mvn dependency:copy-dependencies -f ${myPom}
+ if [ $? -eq 0 ]; then
+ ls -l target/dependency/
+
+ mkdir wlsdeploy/classpathLibraries/azureLibraries
+ mkdir wlsdeploy/classpathLibraries/jackson
+ # fix JARs conflict issue in GA images, put jackson libraries to PRE_CLASSPATH to upgrade the existing libs.
+ mv target/dependency/jackson-annotations-*.jar wlsdeploy/classpathLibraries/jackson/
+ mv target/dependency/jackson-core-*.jar wlsdeploy/classpathLibraries/jackson/
+ mv target/dependency/jackson-databind-*.jar wlsdeploy/classpathLibraries/jackson/
+ mv target/dependency/jackson-dataformat-xml-*.jar wlsdeploy/classpathLibraries/jackson/
+ # Thoes jars will be appended to CLASSPATH
+ mv target/dependency/*.jar wlsdeploy/classpathLibraries/azureLibraries/
+ else
+ echo "Failed to download dependencies for azure-identity-extensions"
+ exit 1
+ fi
+}
+
+function download_mysql_driver() {
+ local myPom=mysqlpom.xml
+ curl -m ${curlMaxTime} --retry ${retryMaxAttempt} -fsL "${gitUrl4MySQLDriverPomFile}" -o ${myPom}
+ validate_status "Check status of downloading MySQL driver Pom file."
+
+ echo "download dependencies"
+ mvn dependency:copy-dependencies -f ${myPom}
+ if [ $? -eq 0 ]; then
+ ls -l target/dependency/
+
+ mkdir wlsdeploy/${constPreclassDirectoryName}
+ mv target/dependency/*.jar wlsdeploy/${constPreclassDirectoryName}/
+ else
+ echo "Failed to download dependencies for mysql driver."
+ exit 1
+ fi
+}
+
+function install_docker_multi_arch(){
+ # Install docker https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository
+ # Add Docker's official GPG key:
sudo apt-get -q update
- sudo apt-get -y -q install apt-transport-https
- curl -m ${curlMaxTime} -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
+ sudo apt-get -y -q install ca-certificates curl
+ sudo install -m 0755 -d /etc/apt/keyrings
+ sudo curl -m ${curlMaxTime} --retry ${retryMaxAttempt} -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
+ sudo chmod a+r /etc/apt/keyrings/docker.asc
+
+ # Add the repository to Apt sources:
echo \
- "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \
- $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list >/dev/null
+ "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
+ $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
+ sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt-get -q update
sudo apt-get -y -q install docker-ce docker-ce-cli containerd.io
@@ -124,22 +208,57 @@ function install_utilities() {
sudo docker --version
validate_status "Check status of docker."
sudo systemctl start docker
+}
+function install_openjdk11_x64(){
# Install Microsoft OpenJDK
- wget https://packages.microsoft.com/config/ubuntu/18.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb
+ # Valid values are only '18.04', '20.04', and '22.04'
+ ubuntu_release=`lsb_release -rs`
+ wget https://packages.microsoft.com/config/ubuntu/${ubuntu_release}/packages-microsoft-prod.deb -O packages-microsoft-prod.deb
sudo dpkg -i packages-microsoft-prod.deb
- sudo apt -q update
- sudo apt -y -q install msopenjdk-11
+
+ sudo apt-get -y -q install apt-transport-https
+ sudo apt-get -q update
+ sudo apt-get -y -q install msopenjdk-11
echo "java version"
java -version
- validate_status "Check status of Zulu JDK 8."
+ validate_status "Check status of OpenJDK 11."
+
+
+ export JAVA_HOME=/usr/lib/jvm/msopenjdk-11-$(dpkg --print-architecture)
+ if [ ! -d "${JAVA_HOME}" ]; then
+ echo "Java home ${JAVA_HOME} does not exist."
+ exit 1
+ fi
+}
+
+function install_openjdk11_arm64(){
+ local zipFileName="microsoft-jdk-11.tar.gz"
+ sudo curl -m ${curlMaxTime} --retry ${retryMaxAttempt} -fsSL ${jdkArm64Url} -o ${zipFileName}
+ sudo mkdir -p /usr/lib/jvm
+ local dirName=$(sudo tar -xzvf ${zipFileName} | head -1 | cut -f1 -d"/")
+ sudo tar -xzvf ${zipFileName}
+ sudo mv ${dirName} msopenjdk-11-amd64
+ sudo mv -f msopenjdk-11-amd64 /usr/lib/jvm/
export JAVA_HOME=/usr/lib/jvm/msopenjdk-11-amd64
if [ ! -d "${JAVA_HOME}" ]; then
echo "Java home ${JAVA_HOME} does not exist"
exit 1
fi
+}
+
+# Install docker, zip, unzip and java
+# Download WebLogic Tools
+function install_utilities() {
+ install_docker_multi_arch
+
+ if [[ "$(dpkg --print-architecture)" == "arm64" ]]; then
+ install_openjdk11_arm64
+ else
+ install_openjdk11_x64
+ fi
sudo apt -y -q install zip
zip --help
@@ -150,26 +269,82 @@ function install_utilities() {
unzip --help
validate_status "Check status of unzip."
- # Download weblogic tools
- curl -m ${curlMaxTime} -fL ${wdtDownloadURL} -o weblogic-deploy.zip
- validate_status "Check status of weblogic-deploy.zip."
+ sudo apt-get -y -q install jq
+ echo "jq version"
+ jq --help
+ validate_status "Check status of unzip."
- curl -m ${curlMaxTime} -fL ${witDownloadURL} -o imagetool.zip
- validate_status "Check status of imagetool.zip."
+ sudo apt -y -q install maven
+ mvn --help
+ validate_status "Check status of mvn."
- curl -m ${curlMaxTime} -fL ${wlsPostgresqlDriverUrl} -o ${scriptDir}/model-images/wlsdeploy/domainLibraries/postgresql-42.2.8.jar
- validate_status "Install postgresql driver."
+ download_wdt_wit
- curl -m ${curlMaxTime} -fL ${wlsMSSQLDriverUrl} -o ${scriptDir}/model-images/wlsdeploy/domainLibraries/mssql-jdbc-7.4.1.jre8.jar
- validate_status "Install mssql driver."
+ if [[ "${dbType}" == "postgresql" ]]; then
+ curl -m ${curlMaxTime} \
+ --retry ${retryMaxAttempt} \
+ -fL ${wlsPostgresqlDriverUrl} \
+ -o ${scriptDir}/model-images/wlsdeploy/${externalJDBCLibrariesDirectoryName}/${constPostgreDriverName}
+ validate_status "Install postgresql driver."
+ fi
+
+ if [[ "${dbType}" == "sqlserver" ]]; then
+ curl -m ${curlMaxTime} \
+ --retry ${retryMaxAttempt} \
+ -fL ${wlsMSSQLDriverUrl} \
+ -o ${scriptDir}/model-images/wlsdeploy/${externalJDBCLibrariesDirectoryName}/${constMSSQLDriverName}
+ validate_status "Install mssql driver."
+ fi
+
+ if [[ "${dbType}" == "mysql" ]]; then
+ download_mysql_driver
+ fi
+
+ # for sqlserver. no need to install azure identity extensions
+ if [[ "${enablePswlessConnection,,}" == "true" ]] && [[ "${dbType}" == "mysql" || "${dbType}" == "postgresql" ]]; then
+ download_azure_identity_extensions
+ fi
+}
+
+function install_db_drivers() {
+ if [ "${dbDriversUrls}" == "[]" ] || [ -z "${dbDriversUrls}" ]; then
+ return
+ fi
+
+ local dbDriversUrls=$(echo "${dbDriversUrls:1:${#dbDriversUrls}-2}")
+ local dbDriversUrlsArray=$(echo $dbDriversUrls | tr "," "\n")
+
+ for item in $dbDriversUrlsArray; do
+ echo ${item}
+ # e.g. https://wlsaksapp.blob.core.windows.net/japps/mariadb-java-client-2.7.4.jar?sp=r&se=2021-04-29T15:12:38Z&sv=2020-02-10&sr=b&sig=7grL4qP%2BcJ%2BLfDJgHXiDeQ2ZvlWosRLRQ1ciLk0Kl7M%3D
+ local urlWithoutQueryString="${item%\?*}"
+ echo $urlWithoutQueryString
+ local fileName="${urlWithoutQueryString##*/}"
+ echo $fileName
+
+ curl -m ${curlMaxTime} --retry ${retryMaxAttempt} -fL "$item" -o ${scriptDir}/model-images/wlsdeploy/${externalJDBCLibrariesDirectoryName}/${fileName}
+ if [ $? -ne 0 ];then
+ echo "Failed to download $item"
+ exit 1
+ fi
+ done
}
# Login in OCR
# Pull weblogic image
function get_wls_image_from_ocr() {
sudo docker logout
- sudo docker login ${ocrLoginServer} -u ${ocrSSOUser} -p ${ocrSSOPSW}
- echo "Start to pull image ${wlsImagePath}"
+ sudo docker login ${ocrLoginServer} -u ${ocrSSOUser} -p ${ocrSSOShibboleth}
+ echo "Start to pull oracle image ${wlsImagePath} ${ocrLoginServer} ${ocrSSOUser} ${ocrSSOShibboleth}"
+ sudo docker pull -q ${wlsImagePath}
+ validate_status "Finish pulling image from OCR."
+}
+
+# Get user provided image
+function get_user_provided_wls_image_from_acr() {
+ sudo docker logout
+ sudo docker login ${azureACRServer} -u ${azureACRUserName} -p ${azureACRShibboleth}
+ echo "Start to pull user provided image ${wlsImagePath} ${azureACRServer} ${azureACRUserName} ${azureACRShibboleth}"
sudo docker pull -q ${wlsImagePath}
validate_status "Finish pulling image from OCR."
}
@@ -208,6 +383,30 @@ function build_wls_image() {
# Zip wls model and applications
zip -r ${scriptDir}/model-images/archive.zip wlsdeploy
+ # inspect user/group of the base image
+ local imageInfo=$(./imagetool/bin/imagetool.sh inspect --image ${wlsImagePath})
+ # {
+ # "os" : {
+ # "id" : "ol",
+ # "name" : "Oracle Linux Server",
+ # "version" : "7.9"
+ # },
+ # "javaHome" : "/u01/jdk",
+ # "javaVersion" : "1.8.0_271",
+ # "oracleHome" : "/u01/oracle",
+ # "oracleHomeGroup" : "oracle",
+ # "oracleHomeUser" : "oracle",
+ # "oracleInstalledProducts" : "WLS,COH,TOPLINK",
+ # "packageManager" : "YUM",
+ # "wlsVersion" : "12.2.1.4.0"
+ # }
+ echo ${imageInfo}
+ local user=${imageInfo#*oracleHomeUser}
+ local user=$(echo ${user%%\,*} | tr -d "\"\:\ ")
+ local group=${imageInfo#*oracleHomeGroup}
+ local group=$(echo ${group%%\,*} | tr -d "\"\:\ ")
+ echo "use ${user}:${group} to update the image"
+
# Build image
echo "Start building WLS image."
./imagetool/bin/imagetool.sh update \
@@ -218,8 +417,8 @@ function build_wls_image() {
--wdtArchive ${scriptDir}/model-images/archive.zip \
--wdtModelOnly \
--wdtDomainType WLS \
- --chown oracle:root
- # --additionalBuildCommands ${scriptDir}/nodemanager.dockerfile
+ --platform ${cpuPlatform} \
+ --chown ${user}:${group}
validate_status "Check status of building WLS domain image."
@@ -227,7 +426,7 @@ function build_wls_image() {
# Push image to ACR
sudo docker logout
- sudo docker login $azureACRServer -u ${azureACRUserName} -p ${azureACRPassword}
+ sudo docker login $azureACRServer -u ${azureACRUserName} -p ${azureACRShibboleth}
echo "Start pushing image ${acrImagePath} to $azureACRServer."
sudo docker push -q ${acrImagePath}
validate_status "Check status of pushing WLS domain image."
@@ -250,13 +449,13 @@ export wlsClusterSize=$7
export enableSSL=$8
export enableAdminT3Tunneling=$9
export enableClusterT3Tunneling=${10}
+export useOracleImage=${11}
+export dbDriversUrls=${12}
+export enablePswlessConnection=${13}
+export dbType=${14}
+export cpuPlatform=${15}
export acrImagePath="$azureACRServer/aks-wls-images:${imageTag}"
-export ocrLoginServer="container-registry.oracle.com"
-export wdtDownloadURL="https://github.com/oracle/weblogic-deploy-tooling/releases/download/release-1.9.17/weblogic-deploy.zip"
-export witDownloadURL="https://github.com/oracle/weblogic-image-tool/releases/download/release-1.9.16/imagetool.zip"
-export wlsPostgresqlDriverUrl="https://jdbc.postgresql.org/download/postgresql-42.2.8.jar"
-export wlsMSSQLDriverUrl="https://repo.maven.apache.org/maven2/com/microsoft/sqlserver/mssql-jdbc/7.4.1.jre8/mssql-jdbc-7.4.1.jre8.jar"
read_sensitive_parameters_from_stdin
@@ -266,7 +465,13 @@ initialize
install_utilities
-get_wls_image_from_ocr
+install_db_drivers
+
+if [[ "${useOracleImage,,}" == "${constTrue}" ]]; then
+ get_wls_image_from_ocr
+else
+ get_user_provided_wls_image_from_acr
+fi
prepare_wls_models
diff --git a/weblogic-azure-aks/src/main/arm/scripts/checkApplicationStatus.py b/weblogic-azure-aks/src/main/arm/scripts/checkApplicationStatus.py
new file mode 100644
index 000000000..0936b1ffd
--- /dev/null
+++ b/weblogic-azure-aks/src/main/arm/scripts/checkApplicationStatus.py
@@ -0,0 +1,58 @@
+# Copyright (c) 2021, Oracle Corporation and/or its affiliates.
+# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+import sys
+
+def usage():
+ print(sys.argv[0] + '-user -password -t3ChannelAddress -t3ChannelPort ')
+
+if len(sys.argv) < 4:
+ usage()
+ sys.exit(0)
+
+#domainUser is hard-coded to weblogic. You can change to other name of your choice. Command line paramter -user.
+domainUser = 'weblogic'
+#domainPassword will be passed by Command line parameter -password.
+domainPassword = None
+t3ChannelPort = None
+t3ChannelAddress = None
+
+i = 1
+while i < len(sys.argv):
+ if sys.argv[i] == '-user':
+ domainUser = sys.argv[i + 1]
+ i += 2
+ elif sys.argv[i] == '-password':
+ domainPassword = sys.argv[i + 1]
+ i += 2
+ elif sys.argv[i] == '-t3ChannelAddress':
+ t3ChannelAddress = sys.argv[i + 1]
+ i += 2
+ elif sys.argv[i] == '-t3ChannelPort':
+ t3ChannelPort = sys.argv[i + 1]
+ i += 2
+ else:
+ print('Unexpected argument switch at position ' + str(i) + ': ' + str(sys.argv[i]))
+ usage()
+ sys.exit(1)
+
+t3ConnectionUri='t3://'+t3ChannelAddress+':'+t3ChannelPort
+connect(domainUser, domainPassword, t3ConnectionUri)
+myapps=cmo.getAppDeployments()
+inactiveApp=0
+for app in myapps:
+ bean=getMBean('/AppDeployments/'+app.getName()+'/Targets/')
+ targetsbean=bean.getTargets()
+ for target in targetsbean:
+ domainRuntime()
+ cd('AppRuntimeStateRuntime/AppRuntimeStateRuntime')
+ appstatus=cmo.getCurrentState(app.getName(),target.getName())
+ if appstatus != 'STATE_ACTIVE':
+ inactiveApp=inactiveApp+1
+ serverConfig()
+
+# TIGHT COUPLING: this exact print text is expected to indicate a successful return.
+if inactiveApp == 0:
+ print("Summary: all applications are active!")
+else:
+ print("Summary: number of inactive application: " + str(inactiveApp) + '.')
diff --git a/weblogic-azure-aks/src/main/arm/scripts/common.sh b/weblogic-azure-aks/src/main/arm/scripts/common.sh
index dcf214f3d..43b74eae4 100644
--- a/weblogic-azure-aks/src/main/arm/scripts/common.sh
+++ b/weblogic-azure-aks/src/main/arm/scripts/common.sh
@@ -1,20 +1,69 @@
+# Copyright (c) 2021, 2024, Oracle Corporation and/or its affiliates.
+# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
+# This script runs on Azure Container Instance with Alpine Linux that Azure Deployment script creates.
+
export checkPodStatusInterval=20 # interval of checking pod status.
-export checkPodStatusMaxAttemps=30 # max attempt to check pod status.
+export checkPodStatusMaxAttemps=200 # max attempt to check pod status.
export checkPVStateInterval=5 # interval of checking pvc status.
export checkPVStateMaxAttempt=10 # max attempt to check pvc status.
-export checkSVCStateMaxAttempt=10
+export checkSVCStateMaxAttempt=50
export checkSVCInterval=30 #seconds
+export checkAGICStatusMaxAttempt=10
+export checkAGICStatusInterval=30
+export checkIngressStateMaxAttempt=50
+export checkAcrInterval=30
+export checkAcrMaxAttempt=10
+export checkAgicInterval=30
+export checkAgicMaxAttempt=50
+export checkKedaInteval=30
+export checkKedaMaxAttempt=20
export constAdminT3AddressEnvName="T3_TUNNELING_ADMIN_ADDRESS"
export constAdminServerName='admin-server'
export constClusterName='cluster-1'
export constClusterT3AddressEnvName="T3_TUNNELING_CLUSTER_ADDRESS"
+export constARM64Platform="arm64"
+export constX86Platform="amd64"
+export constMultiArchPlatform="Multi-architecture"
+export constDBTypeMySQL="mysql"
+export constDBTypeSqlServer="sqlserver"
+export constDefaultJavaOptions="-Dlog4j2.formatMsgNoLookups=true -Dweblogic.StdoutDebugEnabled=false" # the java options will be applied to the cluster
+export constDefaultJVMArgs="-Djava.security.egd=file:/dev/./urandom -Xms256m -Xmx512m -XX:MinRAMPercentage=25.0 -XX:MaxRAMPercentage=50.0 " # the JVM options will be applied to the cluster
+export constDefaultAKSVersion="default"
+export externalJDBCLibrariesDirectoryName="externalJDBCLibraries"
export constFalse="false"
export constTrue="true"
+export constIntrospectorJobActiveDeadlineSeconds=300 # for Guaranteed Qos
+export constPostgreDriverName="postgresql-42.7.5.jar"
+export constMSSQLDriverName="mssql-jdbc-11.2.3.jre8.jar"
+export constAzureCoreVersion="1.34.0"
+export constDbPodIdentitySelector="db-pod-identity" # do not change the value
+export constPreclassDirectoryName="preclassLibraries"
+export constLivenessProbePeriodSeconds=30
+export constLivenessProbeTimeoutSeconds=5
+export constLivenessProbeFailureThreshold=20
+export constReadinessProbeProbePeriodSeconds=10
+export constReadinessProbeTimeoutSeconds=5
+export constReadinessProbeFailureThreshold=3
export curlMaxTime=120 # seconds
export ocrLoginServer="container-registry.oracle.com"
+export ocrGaImagePath="middleware/weblogic"
+export ocrCpuImagePath="middleware/weblogic_cpu"
+export gitUrl4CpuImages="https://raw.githubusercontent.com/oracle/weblogic-azure/556ebc6bfd92679ceeb843f0a1bdea98a06ca608/weblogic-azure-aks/src/main/resources/weblogic_cpu_images.json"
+export gitUrl4AksWellTestedVersionJsonFile="https://raw.githubusercontent.com/oracle/weblogic-azure/556ebc6bfd92679ceeb843f0a1bdea98a06ca608/weblogic-azure-aks/src/main/resources/aks_well_tested_version.json"
+export gitUrl4AksToolingWellTestedVersionJsonFile="https://raw.githubusercontent.com/oracle/weblogic-azure/556ebc6bfd92679ceeb843f0a1bdea98a06ca608/weblogic-azure-aks/src/main/resources/aks_tooling_well_tested_versions.json"
+export gitUrl4WLSToolingFamilyJsonFile="https://raw.githubusercontent.com/oracle/weblogic-azure/556ebc6bfd92679ceeb843f0a1bdea98a06ca608/weblogic-azure-aks/src/main/resources/weblogic_tooling_family.json"
+export gitUrl4AzureIdentityExtensionsPomFile="https://raw.githubusercontent.com/oracle/weblogic-azure/556ebc6bfd92679ceeb843f0a1bdea98a06ca608/weblogic-azure-aks/src/main/resources/azure-identity-extensions.xml"
+export gitUrl4MySQLDriverPomFile="https://raw.githubusercontent.com/oracle/weblogic-azure/556ebc6bfd92679ceeb843f0a1bdea98a06ca608/weblogic-azure-aks/src/main/resources/mysql-connector-java.xml"
+
export optUninstallMaxTry=5 # Max attempts to wait for the operator uninstalled
export optUninstallInterval=10
+export retryMaxAttempt=5 # retry attempt for curl command
+export retryInterval=10
+
export wlsContainerName="weblogic-server"
+export wlsPostgresqlDriverUrl="https://jdbc.postgresql.org/download/postgresql-42.7.5.jar"
+export wlsMSSQLDriverUrl="https://repo1.maven.org/maven2/com/microsoft/sqlserver/mssql-jdbc/11.2.3.jre8/mssql-jdbc-11.2.3.jre8.jar"
+export jdkArm64Url="https://aka.ms/download-jdk/microsoft-jdk-11.0.23-linux-aarch64.tar.gz"
diff --git a/weblogic-azure-aks/src/main/arm/scripts/createAppGatewayIngress.sh b/weblogic-azure-aks/src/main/arm/scripts/createAppGatewayIngress.sh
index a4d41688d..49916b55d 100644
--- a/weblogic-azure-aks/src/main/arm/scripts/createAppGatewayIngress.sh
+++ b/weblogic-azure-aks/src/main/arm/scripts/createAppGatewayIngress.sh
@@ -1,4 +1,4 @@
-# Copyright (c) 2021, Oracle Corporation and/or its affiliates.
+# Copyright (c) 2021, 2024, Oracle Corporation and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
# Description: to create Azure Application Gateway ingress for the following targets.
@@ -8,34 +8,26 @@
echo "Script ${0} starts"
-# read from stdin
-function read_sensitive_parameters_from_stdin() {
- read spBase64String appgwFrontendSSLCertPsw
-}
-
function generate_appgw_cluster_config_file_expose_https() {
- clusterIngressHttpsName=${wlsDomainUID}-cluster-appgw-ingress-https-svc
- clusterAppgwIngressHttpsYamlPath=${scriptDir}/appgw-cluster-ingress-https-svc.yaml
- cat <${clusterAppgwIngressHttpsYamlPath}
+ clusterIngressHttpsName=${WLS_DOMAIN_UID}-cluster-appgw-ingress-https-svc
+ clusterAppgwIngressHttpsYamlPath=${scriptDir}/appgw-cluster-ingress-https-svc.yaml
+ cat <${clusterAppgwIngressHttpsYamlPath}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ${clusterIngressHttpsName}
namespace: ${wlsDomainNS}
+ labels:
+ weblogic.domainUID: "${WLS_DOMAIN_UID}"
+ azure.weblogic.target: "${constClusterName}"
+ azure.weblogc.createdByWlsOffer: "true"
annotations:
- kubernetes.io/ingress.class: azure/application-gateway
-EOF
-
- if [[ "${enableCookieBasedAffinity,,}" == "true" ]]; then
- cat <>${clusterAppgwIngressHttpsYamlPath}
- appgw.ingress.kubernetes.io/cookie-based-affinity: "true"
-EOF
- fi
-
- cat <>${clusterAppgwIngressHttpsYamlPath}
+ appgw.ingress.kubernetes.io/appgw-ssl-certificate: "${APPGW_SSL_CERT_NAME}"
+ appgw.ingress.kubernetes.io/use-private-ip: "${APPGW_USE_PRIVATE_IP}"
+ appgw.ingress.kubernetes.io/cookie-based-affinity: "${ENABLE_COOKIE_BASED_AFFINITY}"
+ appgw.ingress.kubernetes.io/backend-path-prefix: "/"
spec:
- tls:
- - secretName: ${appgwFrontendSecretName}
+ ingressClassName: azure-application-gateway
rules:
- http:
paths:
@@ -50,26 +42,24 @@ EOF
}
function generate_appgw_cluster_config_file_nossl() {
- clusterIngressName=${wlsDomainUID}-cluster-appgw-ingress-svc
- clusterAppgwIngressYamlPath=${scriptDir}/appgw-cluster-ingress-svc.yaml
- cat <${clusterAppgwIngressYamlPath}
+ clusterIngressName=${WLS_DOMAIN_UID}-cluster-appgw-ingress-svc
+ clusterAppgwIngressYamlPath=${scriptDir}/appgw-cluster-ingress-svc.yaml
+ cat <${clusterAppgwIngressYamlPath}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ${clusterIngressName}
namespace: ${wlsDomainNS}
+ labels:
+ weblogic.domainUID: "${WLS_DOMAIN_UID}"
+ azure.weblogic.target: "${constClusterName}"
+ azure.weblogc.createdByWlsOffer: "true"
annotations:
- kubernetes.io/ingress.class: azure/application-gateway
-EOF
-
- if [[ "${enableCookieBasedAffinity,,}" == "true" ]]; then
- cat <>${clusterAppgwIngressYamlPath}
- appgw.ingress.kubernetes.io/cookie-based-affinity: "true"
-EOF
- fi
-
- cat <>${clusterAppgwIngressYamlPath}
+ appgw.ingress.kubernetes.io/use-private-ip: "${APPGW_USE_PRIVATE_IP}"
+ appgw.ingress.kubernetes.io/cookie-based-affinity: "${ENABLE_COOKIE_BASED_AFFINITY}"
+ appgw.ingress.kubernetes.io/backend-path-prefix: "/"
spec:
+ ingressClassName: azure-application-gateway
rules:
- http:
paths:
@@ -84,41 +74,41 @@ EOF
}
function generate_appgw_cluster_config_file_ssl() {
- clusterIngressName=${wlsDomainUID}-cluster-appgw-ingress-svc
- clusterAppgwIngressYamlPath=${scriptDir}/appgw-cluster-ingress-svc.yaml
- cat <${clusterAppgwIngressYamlPath}
+ clusterIngressName=${WLS_DOMAIN_UID}-cluster-appgw-ingress-svc
+ clusterAppgwIngressYamlPath=${scriptDir}/appgw-cluster-ingress-svc.yaml
+ cat <${clusterAppgwIngressYamlPath}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ${clusterIngressName}
namespace: ${wlsDomainNS}
+ labels:
+ weblogic.domainUID: "${WLS_DOMAIN_UID}"
+ azure.weblogic.target: "${constClusterName}"
+ azure.weblogc.createdByWlsOffer: "true"
annotations:
- kubernetes.io/ingress.class: azure/application-gateway
appgw.ingress.kubernetes.io/ssl-redirect: "true"
appgw.ingress.kubernetes.io/backend-protocol: "https"
+ appgw.ingress.kubernetes.io/appgw-ssl-certificate: "${APPGW_SSL_CERT_NAME}"
+ appgw.ingress.kubernetes.io/use-private-ip: "${APPGW_USE_PRIVATE_IP}"
+ appgw.ingress.kubernetes.io/cookie-based-affinity: "${ENABLE_COOKIE_BASED_AFFINITY}"
+ appgw.ingress.kubernetes.io/backend-path-prefix: "/"
EOF
- if [[ "${enableCustomDNSAlias,,}" == "true" ]]; then
- cat <>${clusterAppgwIngressYamlPath}
- appgw.ingress.kubernetes.io/backend-hostname: "${dnsClusterLabel}.${dnsZoneName}"
-EOF
- else
- cat <>${clusterAppgwIngressYamlPath}
- appgw.ingress.kubernetes.io/backend-hostname: "${appgwAlias}"
+ if [[ "${ENABLE_DNS_CONFIGURATION,,}" == "true" ]]; then
+ cat <>${clusterAppgwIngressYamlPath}
+ appgw.ingress.kubernetes.io/backend-hostname: "${DNS_CLUSTER_LABEL}.${DNS_ZONE_NAME}"
EOF
- fi
-
- if [[ "${enableCookieBasedAffinity,,}" == "true" ]]; then
- cat <>${clusterAppgwIngressYamlPath}
- appgw.ingress.kubernetes.io/cookie-based-affinity: "true"
+ else
+ cat <>${clusterAppgwIngressYamlPath}
+ appgw.ingress.kubernetes.io/backend-hostname: "${APPGW_ALIAS}"
EOF
- fi
+ fi
- cat <>${clusterAppgwIngressYamlPath}
- appgw.ingress.kubernetes.io/appgw-trusted-root-certificate: "${appgwBackendSecretName}"
+ cat <>${clusterAppgwIngressYamlPath}
+ appgw.ingress.kubernetes.io/appgw-trusted-root-certificate: "${APPGW_TRUSTED_ROOT_CERT_NAME}"
spec:
- tls:
- - secretName: ${appgwFrontendSecretName}
+ ingressClassName: azure-application-gateway
rules:
- http:
paths:
@@ -133,26 +123,23 @@ EOF
}
function generate_appgw_admin_config_file_nossl() {
- adminIngressName=${wlsDomainUID}-admin-appgw-ingress-svc
- adminAppgwIngressYamlPath=${scriptDir}/appgw-admin-ingress-svc.yaml
- cat <${adminAppgwIngressYamlPath}
+ adminIngressName=${WLS_DOMAIN_UID}-admin-appgw-ingress-svc
+ adminAppgwIngressYamlPath=${scriptDir}/appgw-admin-ingress-svc.yaml
+ cat <${adminAppgwIngressYamlPath}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ${adminIngressName}
namespace: ${wlsDomainNS}
+ labels:
+ weblogic.domainUID: "${WLS_DOMAIN_UID}"
+ azure.weblogic.target: "${constAdminServerName}"
+ azure.weblogc.createdByWlsOffer: "true"
annotations:
- kubernetes.io/ingress.class: azure/application-gateway
-EOF
-
- if [[ "${enableCookieBasedAffinity,,}" == "true" ]]; then
- cat <>${adminAppgwIngressYamlPath}
- appgw.ingress.kubernetes.io/cookie-based-affinity: "true"
-EOF
- fi
-
- cat <>${adminAppgwIngressYamlPath}
+ appgw.ingress.kubernetes.io/use-private-ip: "${APPGW_USE_PRIVATE_IP}"
+ appgw.ingress.kubernetes.io/cookie-based-affinity: "${ENABLE_COOKIE_BASED_AFFINITY}"
spec:
+ ingressClassName: azure-application-gateway
rules:
- http:
paths:
@@ -167,25 +154,22 @@ EOF
}
function generate_appgw_admin_remote_config_file_nossl() {
- cat <${adminRemoteAppgwIngressYamlPath}
+ cat <${adminRemoteAppgwIngressYamlPath}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ${adminRemoteIngressName}
namespace: ${wlsDomainNS}
+ labels:
+ weblogic.domainUID: "${WLS_DOMAIN_UID}"
+ azure.weblogic.target: "${constAdminServerName}-remote-console"
+ azure.weblogc.createdByWlsOffer: "true"
annotations:
- kubernetes.io/ingress.class: azure/application-gateway
appgw.ingress.kubernetes.io/backend-path-prefix: "/"
-EOF
-
- if [[ "${enableCookieBasedAffinity,,}" == "true" ]]; then
- cat <>${adminRemoteAppgwIngressYamlPath}
- appgw.ingress.kubernetes.io/cookie-based-affinity: "true"
-EOF
- fi
-
- cat <>${adminRemoteAppgwIngressYamlPath}
+ appgw.ingress.kubernetes.io/use-private-ip: "${APPGW_USE_PRIVATE_IP}"
+ appgw.ingress.kubernetes.io/cookie-based-affinity: "${ENABLE_COOKIE_BASED_AFFINITY}"
spec:
+ ingressClassName: azure-application-gateway
rules:
- http:
paths:
@@ -200,42 +184,41 @@ EOF
}
function generate_appgw_admin_config_file_ssl() {
- adminIngressName=${wlsDomainUID}-admin-appgw-ingress-svc
- adminAppgwIngressYamlPath=${scriptDir}/appgw-admin-ingress-svc.yaml
- cat <${adminAppgwIngressYamlPath}
+ adminIngressName=${WLS_DOMAIN_UID}-admin-appgw-ingress-svc
+ adminAppgwIngressYamlPath=${scriptDir}/appgw-admin-ingress-svc.yaml
+ cat <${adminAppgwIngressYamlPath}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ${adminIngressName}
namespace: ${wlsDomainNS}
+ labels:
+ weblogic.domainUID: "${WLS_DOMAIN_UID}"
+ azure.weblogic.target: "${constAdminServerName}"
+ azure.weblogc.createdByWlsOffer: "true"
annotations:
- kubernetes.io/ingress.class: azure/application-gateway
appgw.ingress.kubernetes.io/ssl-redirect: "true"
appgw.ingress.kubernetes.io/backend-protocol: "https"
+ appgw.ingress.kubernetes.io/appgw-ssl-certificate: "${APPGW_SSL_CERT_NAME}"
+ appgw.ingress.kubernetes.io/use-private-ip: "${APPGW_USE_PRIVATE_IP}"
+ appgw.ingress.kubernetes.io/cookie-based-affinity: "${ENABLE_COOKIE_BASED_AFFINITY}"
EOF
- if [[ "${enableCustomDNSAlias,,}" == "true" ]]; then
- cat <>${adminAppgwIngressYamlPath}
- appgw.ingress.kubernetes.io/backend-hostname: "${dnsAdminLabel}.${dnsZoneName}"
-EOF
- else
- cat <>${adminAppgwIngressYamlPath}
- appgw.ingress.kubernetes.io/backend-hostname: "${appgwAlias}"
+ if [[ "${ENABLE_DNS_CONFIGURATION,,}" == "true" ]]; then
+ cat <>${adminAppgwIngressYamlPath}
+ appgw.ingress.kubernetes.io/backend-hostname: "${DNS_ADMIN_LABEL}.${DNS_ZONE_NAME}"
EOF
- fi
-
- if [[ "${enableCookieBasedAffinity,,}" == "true" ]]; then
- cat <>${adminAppgwIngressYamlPath}
- appgw.ingress.kubernetes.io/cookie-based-affinity: "true"
+ else
+ cat <>${adminAppgwIngressYamlPath}
+ appgw.ingress.kubernetes.io/backend-hostname: "${APPGW_ALIAS}"
EOF
- fi
+ fi
- cat <>${adminAppgwIngressYamlPath}
- appgw.ingress.kubernetes.io/appgw-trusted-root-certificate: "${appgwBackendSecretName}"
+ cat <>${adminAppgwIngressYamlPath}
+ appgw.ingress.kubernetes.io/appgw-trusted-root-certificate: "${APPGW_TRUSTED_ROOT_CERT_NAME}"
spec:
- tls:
- - secretName: ${appgwFrontendSecretName}
+ ingressClassName: azure-application-gateway
rules:
- http:
paths:
@@ -250,42 +233,40 @@ EOF
}
function generate_appgw_admin_remote_config_file_ssl() {
- cat <${adminRemoteAppgwIngressYamlPath}
+ cat <${adminRemoteAppgwIngressYamlPath}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ${adminRemoteIngressName}
namespace: ${wlsDomainNS}
+ labels:
+ weblogic.domainUID: "${WLS_DOMAIN_UID}"
+ azure.weblogic.target: "${constAdminServerName}-remote-console"
+ azure.weblogc.createdByWlsOffer: "true"
annotations:
- kubernetes.io/ingress.class: azure/application-gateway
appgw.ingress.kubernetes.io/backend-path-prefix: "/"
appgw.ingress.kubernetes.io/ssl-redirect: "true"
appgw.ingress.kubernetes.io/backend-protocol: "https"
-
+ appgw.ingress.kubernetes.io/appgw-ssl-certificate: "${APPGW_SSL_CERT_NAME}"
+ appgw.ingress.kubernetes.io/use-private-ip: "${APPGW_USE_PRIVATE_IP}"
+ appgw.ingress.kubernetes.io/cookie-based-affinity: "${ENABLE_COOKIE_BASED_AFFINITY}"
EOF
- if [[ "${enableCustomDNSAlias,,}" == "true" ]]; then
- cat <>${adminRemoteAppgwIngressYamlPath}
- appgw.ingress.kubernetes.io/backend-hostname: "${dnsAdminLabel}.${dnsZoneName}"
+ if [[ "${ENABLE_DNS_CONFIGURATION,,}" == "true" ]]; then
+ cat <>${adminRemoteAppgwIngressYamlPath}
+ appgw.ingress.kubernetes.io/backend-hostname: "${DNS_ADMIN_LABEL}.${DNS_ZONE_NAME}"
EOF
- else
- cat <>${adminRemoteAppgwIngressYamlPath}
- appgw.ingress.kubernetes.io/backend-hostname: "${appgwAlias}"
+ else
+ cat <>${adminRemoteAppgwIngressYamlPath}
+ appgw.ingress.kubernetes.io/backend-hostname: "${APPGW_ALIAS}"
EOF
- fi
+ fi
- if [[ "${enableCookieBasedAffinity,,}" == "true" ]]; then
- cat <>${adminRemoteAppgwIngressYamlPath}
- appgw.ingress.kubernetes.io/cookie-based-affinity: "true"
-EOF
- fi
-
- cat <>${adminRemoteAppgwIngressYamlPath}
- appgw.ingress.kubernetes.io/appgw-trusted-root-certificate: "${appgwBackendSecretName}"
+ cat <>${adminRemoteAppgwIngressYamlPath}
+ appgw.ingress.kubernetes.io/appgw-trusted-root-certificate: "${APPGW_TRUSTED_ROOT_CERT_NAME}"
spec:
- tls:
- - secretName: ${appgwFrontendSecretName}
+ ingressClassName: azure-application-gateway
rules:
- http:
paths:
@@ -300,293 +281,265 @@ EOF
}
function query_admin_target_port() {
- if [[ "${enableCustomSSL,,}" == "true" ]]; then
- adminTargetPort=$(utility_query_service_port ${svcAdminServer} ${wlsDomainNS} 'default-secure')
- else
- adminTargetPort=$(utility_query_service_port ${svcAdminServer} ${wlsDomainNS} 'default')
- fi
+ if [[ "${ENABLE_CUSTOM_SSL,,}" == "true" ]]; then
+ adminTargetPort=$(utility_query_service_port ${svcAdminServer} ${wlsDomainNS} 'internal-t3s')
+ else
+ adminTargetPort=$(utility_query_service_port ${svcAdminServer} ${wlsDomainNS} 'internal-t3')
+ fi
- echo "Admin port of ${adminServerName}: ${adminTargetPort}"
+ echo "Admin port of ${adminServerName}: ${adminTargetPort}"
}
# Create network peers for aks and appgw
function network_peers_aks_appgw() {
- # To successfully peer two virtual networks command 'az network vnet peering create' must be called twice with the values
- # for --vnet-name and --remote-vnet reversed.
- aksMCRGName=$(az aks show -n $aksClusterName -g $aksClusterRGName -o tsv --query "nodeResourceGroup")
- ret=$(az group exists -n ${aksMCRGName})
- if [ "${ret,,}" == "false" ]; then
- echo_stderr "AKS namaged resource group ${aksMCRGName} does not exist."
- exit 1
- fi
-
- aksNetWorkId=$(az resource list -g ${aksMCRGName} --resource-type Microsoft.Network/virtualNetworks -o tsv --query '[*].id')
- aksNetworkName=$(az resource list -g ${aksMCRGName} --resource-type Microsoft.Network/virtualNetworks -o tsv --query '[*].name')
- az network vnet peering create \
- --name aks-appgw-peer \
- --remote-vnet ${aksNetWorkId} \
- --resource-group ${curRGName} \
- --vnet-name ${vnetName} \
- --allow-vnet-access
- utility_validate_status "Create network peers for $aksNetWorkId and ${vnetName}."
-
- appgwNetworkId=$(az resource list -g ${curRGName} --name ${vnetName} -o tsv --query '[*].id')
+ # To successfully peer two virtual networks command 'az network vnet peering create' must be called twice with the values
+ # for --vnet-name and --remote-vnet reversed.
+
+ local aksMCRGName=$(az aks show -n $AKS_CLUSTER_NAME -g $AKS_CLUSTER_RG_NAME -o tsv --query "nodeResourceGroup")
+ local ret=$(az group exists -n ${aksMCRGName})
+ if [ "${ret,,}" == "false" ]; then
+ echo_stderr "AKS namaged resource group ${aksMCRGName} does not exist."
+ exit 1
+ fi
+
+ # query vnet from managed resource group
+ local aksNetWorkId=$(az resource list -g ${aksMCRGName} --resource-type Microsoft.Network/virtualNetworks -o tsv --query '[*].id')
+
+ # no vnet in managed resource group, then query vnet from aks agent
+ if [ -z "${aksNetWorkId}" ]; then
+ # assume all the agent pools are in the same vnet
+ # e.g. /subscriptions/xxxx-xxxx-xxxx-xxxx/resourceGroups/foo-rg/providers/Microsoft.Network/virtualNetworks/foo-aks-vnet/subnets/default
+ local aksAgent1Subnet=$(az aks show -n $AKS_CLUSTER_NAME -g $AKS_CLUSTER_RG_NAME | jq '.agentPoolProfiles[0] | .vnetSubnetId' | tr -d "\"")
+ utility_validate_status "Get subnet id of aks agent 0."
+ aksNetWorkId=${aksAgent1Subnet%\/subnets\/*}
+ fi
+
+ local aksNetworkName=${aksNetWorkId#*\/virtualNetworks\/}
+ local aksNetworkRgName=${aksNetWorkId#*\/resourceGroups\/}
+ local aksNetworkRgName=${aksNetworkRgName%\/providers\/*}
+
+ local appGatewaySubnetId=$(az network application-gateway show -g ${CURRENT_RG_NAME} --name ${APPGW_NAME} -o tsv --query "gatewayIPConfigurations[0].subnet.id")
+ local appGatewayVnetResourceGroup=$(az network application-gateway show -g ${CURRENT_RG_NAME} --name ${APPGW_NAME} -o tsv --query "gatewayIPConfigurations[0].subnet.resourceGroup")
+ local appgwNetworkId=${appGatewaySubnetId%\/subnets\/*}
+ local appgwVnetName=$(az resource show --ids ${appgwNetworkId} --query "name" -o tsv)
+
+ local toPeer=true
+ # if the AKS and App Gateway have the same VNET, need not peer.
+ if [ "${aksNetWorkId}" == "${appgwNetworkId}" ]; then
+ echo_stdout "AKS and Application Gateway are in the same virtual network: ${appgwNetworkId}."
+ toPeer=false
+ fi
+
+ # check if the Vnets have been peered.
+ local ret=$(az network vnet peering list \
+ --resource-group ${appGatewayVnetResourceGroup} \
+ --vnet-name ${appgwVnetName} -o json |
+ jq ".[] | select(.remoteVirtualNetwork.id==\"${aksNetWorkId}\")")
+ if [ -n "$ret" ]; then
+ echo_stdout "VNET of AKS ${aksNetWorkId} and Application Gateway ${appgwNetworkId} is peering."
+ toPeer=false
+ fi
+
+ if [ "${toPeer}" == "true" ]; then
az network vnet peering create \
- --name aks-appgw-peer \
- --remote-vnet ${appgwNetworkId} \
- --resource-group ${aksMCRGName} \
- --vnet-name ${aksNetworkName} \
- --allow-vnet-access
-
- utility_validate_status "Create network peers for $aksNetWorkId and ${vnetName}."
+ --name aks-appgw-peer \
+ --remote-vnet ${aksNetWorkId} \
+ --resource-group ${appGatewayVnetResourceGroup} \
+ --vnet-name ${appgwVnetName} \
+ --allow-vnet-access
+ utility_validate_status "Create network peers for $aksNetWorkId and ${appgwNetworkId}."
- # For Kbectl network plugin: https://azure.github.io/application-gateway-kubernetes-ingress/how-tos/networking/#with-kubenet
- # find route table used by aks cluster
+ az network vnet peering create \
+ --name aks-appgw-peer \
+ --remote-vnet ${appgwNetworkId} \
+ --resource-group ${aksNetworkRgName} \
+ --vnet-name ${aksNetworkName} \
+ --allow-vnet-access
+
+ utility_validate_status "Complete creating network peers for $aksNetWorkId and ${appgwNetworkId}."
+ fi
+
+ # For kubenet network plugin: https://azure.github.io/application-gateway-kubernetes-ingress/how-tos/networking/#with-kubenet
+ # find route table used by aks cluster
+ local networkPlugin=$(az aks show -n $AKS_CLUSTER_NAME -g $AKS_CLUSTER_RG_NAME --query "networkProfile.networkPlugin" -o tsv)
+ if [[ "${networkPlugin}" == "kubenet" ]]; then
+ # the route table is in MC_ resource group
routeTableId=$(az network route-table list -g $aksMCRGName --query "[].id | [0]" -o tsv)
- # get the application gateway's subnet
- appGatewaySubnetId=$(az network application-gateway show -n $appgwName -g $curRGName -o tsv --query "gatewayIpConfigurations[0].subnet.id")
-
# associate the route table to Application Gateway's subnet
az network vnet subnet update \
--ids $appGatewaySubnetId \
--route-table $routeTableId
utility_validate_status "Associate the route table ${routeTableId} to Application Gateway's subnet ${appGatewaySubnetId}"
+ fi
}
function query_cluster_target_port() {
- if [[ "${enableCustomSSL,,}" == "true" ]]; then
- clusterTargetPort=$(utility_query_service_port ${svcCluster} ${wlsDomainNS} 'default-secure')
- else
- clusterTargetPort=$(utility_query_service_port ${svcCluster} ${wlsDomainNS} 'default')
- fi
+ if [[ "${ENABLE_CUSTOM_SSL,,}" == "true" ]]; then
+ clusterTargetPort=$(utility_query_service_port ${svcCluster} ${wlsDomainNS} 'default-secure')
+ else
+ clusterTargetPort=$(utility_query_service_port ${svcCluster} ${wlsDomainNS} 'default')
+ fi
- echo "Cluster port of ${clusterName}: ${clusterTargetPort}"
+ echo "Cluster port of ${clusterName}: ${clusterTargetPort}"
}
-function install_azure_ingress() {
- # create sa and bind cluster-admin role
- # grant azure ingress permission to access WebLogic service
- kubectl apply -f ${scriptDir}/appgw-ingress-clusterAdmin-roleBinding.yaml
-
- install_helm
- helm repo add application-gateway-kubernetes-ingress ${appgwIngressHelmRepo}
- helm repo update
-
- # generate Helm config for azure ingress
- customAppgwHelmConfig=${scriptDir}/appgw-helm-config.yaml
- cp ${scriptDir}/appgw-helm-config.yaml.template ${customAppgwHelmConfig}
- subID=${subID#*\/subscriptions\/}
- sed -i -e "s:@SUB_ID@:${subID}:g" ${customAppgwHelmConfig}
- sed -i -e "s:@APPGW_RG_NAME@:${curRGName}:g" ${customAppgwHelmConfig}
- sed -i -e "s:@APPGW_NAME@:${appgwName}:g" ${customAppgwHelmConfig}
- sed -i -e "s:@WATCH_NAMESPACE@:${wlsDomainNS}:g" ${customAppgwHelmConfig}
- sed -i -e "s:@SP_ENCODING_CREDENTIALS@:${spBase64String}:g" ${customAppgwHelmConfig}
-
- helm install ingress-azure \
- -f ${customAppgwHelmConfig} \
- application-gateway-kubernetes-ingress/ingress-azure \
- --version ${azureAppgwIngressVersion}
-
- utility_validate_status "Install app gateway ingress controller."
-
- attempts=0
- podState="running"
- while [ "$podState" == "running" ] && [ $attempts -lt ${checkPodStatusMaxAttemps} ]; do
- podState="completed"
- attempts=$((attempts + 1))
- echo Waiting for Pod running...${attempts}
- sleep ${checkPodStatusInterval}
-
- ret=$(kubectl get pod -o json |
- jq '.items[] | .status.containerStatuses[] | select(.name=="ingress-azure") | .ready')
- if [[ "${ret}" == "false" ]]; then
- podState="running"
- fi
- done
-
- if [ "$podState" == "running" ] && [ $attempts -ge ${checkPodStatusMaxAttemps} ]; then
- echo_stderr "Failed to install app gateway ingress controller."
- exit 1
- fi
+function generate_appgw_cluster_config_file() {
+ if [[ "${ENABLE_CUSTOM_SSL,,}" == "true" ]]; then
+ generate_appgw_cluster_config_file_ssl
+ else
+ generate_appgw_cluster_config_file_nossl
+ generate_appgw_cluster_config_file_expose_https
+ fi
}
-function output_create_gateway_ssl_k8s_secret() {
- echo "export gateway frontend certificates"
- echo "$appgwFrontendSSLCertData" | base64 -d >${scriptDir}/$appgwFrontCertFileName
-
- appgwFrontendSSLCertPassin=${appgwFrontendSSLCertPsw}
- if [[ "$appgwCertificateOption" == "${appgwSelfsignedCert}" ]]; then
- appgwFrontendSSLCertPassin="" # empty password
- fi
-
- openssl pkcs12 \
- -in ${scriptDir}/$appgwFrontCertFileName \
- -nocerts \
- -out ${scriptDir}/$appgwFrontCertKeyFileName \
- -passin pass:${appgwFrontendSSLCertPassin} \
- -passout pass:${appgwFrontendSSLCertPsw}
-
- utility_validate_status "Export key from frontend certificate."
-
- openssl rsa -in ${scriptDir}/$appgwFrontCertKeyFileName \
- -out ${scriptDir}/$appgwFrontCertKeyDecrytedFileName \
- -passin pass:${appgwFrontendSSLCertPsw}
-
- utility_validate_status "Decryte private key."
-
- openssl pkcs12 \
- -in ${scriptDir}/$appgwFrontCertFileName \
- -clcerts \
- -nokeys \
- -out ${scriptDir}/$appgwFrontPublicCertFileName \
- -passin pass:${appgwFrontendSSLCertPassin}
-
- utility_validate_status "Export cert from frontend certificate."
-
- echo "create k8s tsl secret for app gateway frontend ssl termination"
- kubectl -n ${wlsDomainNS} create secret tls ${appgwFrontendSecretName} \
- --key="${scriptDir}/$appgwFrontCertKeyDecrytedFileName" \
- --cert="${scriptDir}/$appgwFrontPublicCertFileName"
-
- utility_validate_status "create k8s tsl secret for app gateway frontend ssl termination."
+function generate_appgw_admin_config_file() {
+ if [[ "${ENABLE_CUSTOM_SSL,,}" == "true" ]]; then
+ generate_appgw_admin_config_file_ssl
+ else
+ generate_appgw_admin_config_file_nossl
+ fi
}
-function validate_backend_ca_cert() {
- az network application-gateway root-cert list \
- --gateway-name $appgwName \
- --resource-group $curRGName |
- jq '.[] | .name' | grep "${appgwBackendSecretName}"
-
- utility_validate_status "check if backend cert exists."
+function generate_appgw_admin_remote_config_file() {
+ if [[ "${ENABLE_CUSTOM_SSL,,}" == "true" ]]; then
+ generate_appgw_admin_remote_config_file_ssl
+ else
+ generate_appgw_admin_remote_config_file_nossl
+ fi
}
-function generate_appgw_cluster_config_file() {
- if [[ "${enableCustomSSL,,}" == "true" ]]; then
- generate_appgw_cluster_config_file_ssl
- else
- generate_appgw_cluster_config_file_nossl
- generate_appgw_cluster_config_file_expose_https
- fi
-}
+# Currently, ingress controller does not have a tag that identifies it's ready to create ingress.
+# This function is to create an ingress and check it's status. If the ingress is not available, then re-create it again.
+function waitfor_agic_ready_and_create_ingress() {
+ local svcName=$1
+ local ymlFilePath=$2
+
+ local ready=false
+ local attempt=0
+ while [[ "${ready}" == "false" && $attempt -lt ${checkAGICStatusMaxAttempt} ]]; do
+ echo "Waiting for AGIC ready... ${attempt}"
+ attempt=$((attempt + 1))
+ kubectl apply -f ${ymlFilePath}
+
+ # wait for the ingress ready, if the ingress is not available then delete it
+ local svcAttempts=0
+ local svcState="running"
+ while [ "$svcState" == "running" ] && [ $svcAttempts -lt ${checkIngressStateMaxAttempt} ]; do
+ svcAttempts=$((svcAttempts + 1))
+ echo Waiting for job completed...${svcAttempts}
+ sleep ${checkSVCInterval}
+
+ ip=$(kubectl get ingress ${svcName} -n ${wlsDomainNS} -o json |
+ jq '.status.loadBalancer.ingress[0].ip')
+ echo "ip: ${ip}"
+ if [[ "${ip}" != "null" ]]; then
+ svcState="completed"
+ ready=true
+ fi
+ done
-function generate_appgw_admin_config_file() {
- if [[ "${enableCustomSSL,,}" == "true" ]]; then
- generate_appgw_admin_config_file_ssl
- else
- generate_appgw_admin_config_file_nossl
+ if [[ "${ready}" == "false" ]]; then
+ kubectl delete -f ${ymlFilePath}
+ sleep ${checkAGICStatusInterval}
fi
-}
+ done
+
+ if [ ${attempt} -ge ${checkAGICStatusMaxAttempt} ]; then
+ echo_stderr "azure igress is not ready to create ingress. "
+ exit 1
+ fi
-function generate_appgw_admin_remote_config_file() {
- if [[ "${enableCustomSSL,,}" == "true" ]]; then
- generate_appgw_admin_remote_config_file_ssl
- else
- generate_appgw_admin_remote_config_file_nossl
- fi
}
function appgw_ingress_svc_for_cluster() {
- # generate ingress svc config for cluster
- generate_appgw_cluster_config_file
- kubectl apply -f ${clusterAppgwIngressYamlPath}
- utility_validate_status "Create appgw ingress svc."
+ # generate ingress svc config for cluster
+ generate_appgw_cluster_config_file
+ kubectl apply -f ${clusterAppgwIngressYamlPath}
+ utility_validate_status "Create appgw ingress svc."
+ waitfor_agic_ready_and_create_ingress \
+ ${clusterIngressName} \
+ ${clusterAppgwIngressYamlPath}
+
+ # expose https for cluster if e2e ssl is not set up.
+ if [[ "${ENABLE_CUSTOM_SSL,,}" != "true" ]]; then
+ kubectl apply -f ${clusterAppgwIngressHttpsYamlPath}
+ utility_validate_status "Create appgw ingress https svc."
utility_waitfor_ingress_completed \
- ${clusterIngressName} \
- ${wlsDomainNS} \
- ${checkSVCStateMaxAttempt} \
- ${checkSVCInterval}
-
- # expose https for cluster if e2e ssl is not set up.
- if [[ "${enableCustomSSL,,}" != "true" ]]; then
- kubectl apply -f ${clusterAppgwIngressHttpsYamlPath}
- utility_validate_status "Create appgw ingress https svc."
- utility_waitfor_ingress_completed \
- ${clusterIngressHttpsName} \
- ${wlsDomainNS} \
- ${checkSVCStateMaxAttempt} \
- ${checkSVCInterval}
- fi
+ ${clusterIngressHttpsName} \
+ ${wlsDomainNS} \
+ ${checkSVCStateMaxAttempt} \
+ ${checkSVCInterval}
+ fi
}
function appgw_ingress_svc_for_admin_server() {
- generate_appgw_admin_config_file
- kubectl apply -f ${adminAppgwIngressYamlPath}
- utility_validate_status "Create appgw ingress svc."
- utility_waitfor_lb_svc_completed \
- ${adminIngressName} \
- ${wlsDomainNS} \
- ${checkSVCStateMaxAttempt} \
- ${checkSVCInterval}
+ generate_appgw_admin_config_file
+ kubectl apply -f ${adminAppgwIngressYamlPath}
+ utility_validate_status "Create appgw ingress svc."
+ utility_waitfor_ingress_completed \
+ ${adminIngressName} \
+ ${wlsDomainNS} \
+ ${checkSVCStateMaxAttempt} \
+ ${checkSVCInterval}
}
function appgw_ingress_svc_for_remote_console() {
- adminRemoteIngressName=${wlsDomainUID}-admin-remote-appgw-ingress-svc
- adminRemoteAppgwIngressYamlPath=${scriptDir}/appgw-admin-remote-ingress-svc.yaml
- generate_appgw_admin_remote_config_file
-
- kubectl apply -f ${adminRemoteAppgwIngressYamlPath}
- utility_validate_status "Create appgw ingress svc."
- utility_waitfor_lb_svc_completed \
- ${adminRemoteIngressName} \
- ${wlsDomainNS} \
- ${checkSVCStateMaxAttempt} \
- ${checkSVCInterval}
+ adminRemoteIngressName=${WLS_DOMAIN_UID}-admin-remote-appgw-ingress-svc
+ adminRemoteAppgwIngressYamlPath=${scriptDir}/appgw-admin-remote-ingress-svc.yaml
+ generate_appgw_admin_remote_config_file
+
+ kubectl apply -f ${adminRemoteAppgwIngressYamlPath}
+ utility_validate_status "Create appgw ingress svc."
+ utility_waitfor_ingress_completed \
+ ${adminRemoteIngressName} \
+ ${wlsDomainNS} \
+ ${checkSVCStateMaxAttempt} \
+ ${checkSVCInterval}
}
function create_dns_record() {
- if [[ "${enableCustomDNSAlias,,}" == "true" ]]; then
- create_dns_CNAME_record \
- ${appgwAlias} \
- ${dnsClusterLabel} \
- ${dnsRGName} \
- ${dnsZoneName}
- fi
-
- if [[ "${enableCustomDNSAlias,,}" == "true" ]] &&
- [[ "${appgwForAdminServer,,}" == "true" ]]; then
- create_dns_CNAME_record \
- ${appgwAlias} \
- ${dnsAdminLabel} \
- ${dnsRGName} \
- ${dnsZoneName}
- fi
+ if [[ "${ENABLE_DNS_CONFIGURATION,,}" == "true" ]]; then
+ create_dns_CNAME_record \
+ ${APPGW_ALIAS} \
+ ${DNS_CLUSTER_LABEL} \
+ ${DNS_ZONE_RG_NAME} \
+ ${DNS_ZONE_NAME}
+ fi
+
+ if [[ "${ENABLE_DNS_CONFIGURATION,,}" == "true" ]] &&
+ [[ "${APPGW_FOR_ADMIN_SERVER,,}" == "true" ]]; then
+ create_dns_CNAME_record \
+ ${APPGW_ALIAS} \
+ ${DNS_ADMIN_LABEL} \
+ ${DNS_ZONE_RG_NAME} \
+ ${DNS_ZONE_NAME}
+ fi
}
function create_gateway_ingress() {
- # query admin server port used for non-ssl or ssl
- query_admin_target_port
- # query cluster port used for non-ssl or ssl
- query_cluster_target_port
- # create network peers between gateway vnet and aks vnet
- network_peers_aks_appgw
- # install azure ingress controllor
- install_azure_ingress
- # create tsl/ssl frontend secrets
- output_create_gateway_ssl_k8s_secret
-
- # validate backend CA certificate
- # the certificate has been upload to Application Gateway in
- # weblogic-azure-aks\src\main\bicep\modules\networking.bicep
- if [[ "${enableCustomSSL,,}" == "true" ]]; then
- validate_backend_ca_cert
- fi
-
- # create ingress svc for cluster
- appgw_ingress_svc_for_cluster
-
- # create ingress svc for admin console
- if [[ "${appgwForAdminServer,,}" == "true" ]]; then
- appgw_ingress_svc_for_admin_server
- fi
-
- # create ingress svc for admin remote console
- if [[ "${enableRemoteConsole,,}" == "true" ]]; then
- appgw_ingress_svc_for_remote_console
- fi
-
- create_dns_record
+ # query admin server port used for non-ssl or ssl
+ query_admin_target_port
+ # query cluster port used for non-ssl or ssl
+ query_cluster_target_port
+ # create network peers between gateway vnet and aks vnet
+ network_peers_aks_appgw
+
+ # create ingress svc for cluster
+ appgw_ingress_svc_for_cluster
+
+ # create ingress svc for admin console
+ if [[ "${APPGW_FOR_ADMIN_SERVER,,}" == "true" ]]; then
+ appgw_ingress_svc_for_admin_server
+ fi
+
+ # create ingress svc for admin remote console
+ if [[ "${APPGW_FOR_REMOTE_CONSOLE,,}" == "true" ]]; then
+ appgw_ingress_svc_for_remote_console
+ fi
+
+ create_dns_record
}
# Initialize
@@ -597,43 +550,13 @@ source ${scriptDir}/common.sh
source ${scriptDir}/utility.sh
source ${scriptDir}/createDnsRecord.sh
-aksClusterRGName=$1
-aksClusterName=$2
-wlsDomainUID=$3
-subID=$4
-curRGName=$5
-appgwName=$6
-vnetName=$7
-appgwForAdminServer=$8
-enableCustomDNSAlias=$9
-dnsRGName=${10}
-dnsZoneName=${11}
-dnsAdminLabel=${12}
-dnsClusterLabel=${13}
-appgwAlias=${14}
-appgwFrontendSSLCertData=${15}
-appgwCertificateOption=${16}
-enableCustomSSL=${17}
-enableCookieBasedAffinity=${18}
-enableRemoteConsole=${19}
+set -Eo pipefail
adminServerName=${constAdminServerName} # define in common.sh
-appgwIngressHelmRepo="https://appgwingress.blob.core.windows.net/ingress-azure-helm-package/"
-appgwFrontCertFileName="appgw-frontend-cert.pfx"
-appgwFrontCertKeyDecrytedFileName="appgw-frontend-cert.key"
-appgwFrontCertKeyFileName="appgw-frontend-cert-decryted.key"
-appgwFrontPublicCertFileName="appgw-frontend-cert.crt"
-appgwFrontendSecretName="frontend-tls"
-appgwBackendSecretName="backend-tls"
-appgwSelfsignedCert="generateCert"
-azureAppgwIngressVersion="1.4.0"
+azureAppgwIngressVersion="1.5.1"
clusterName=${constClusterName}
-httpsListenerName="myHttpsListenerName$(date +%s)"
-httpsRuleName="myHttpsRule$(date +%s)"
-svcAdminServer="${wlsDomainUID}-${adminServerName}"
-svcCluster="${wlsDomainUID}-cluster-${clusterName}"
-wlsDomainNS="${wlsDomainUID}-ns"
-
-read_sensitive_parameters_from_stdin
+svcAdminServer="${WLS_DOMAIN_UID}-${adminServerName}"
+svcCluster="${WLS_DOMAIN_UID}-cluster-${clusterName}"
+wlsDomainNS="${WLS_DOMAIN_UID}-ns"
create_gateway_ingress
diff --git a/weblogic-azure-aks/src/main/arm/scripts/createLbSvc.sh b/weblogic-azure-aks/src/main/arm/scripts/createLbSvc.sh
index 7f22943c4..bb58a6c07 100644
--- a/weblogic-azure-aks/src/main/arm/scripts/createLbSvc.sh
+++ b/weblogic-azure-aks/src/main/arm/scripts/createLbSvc.sh
@@ -1,4 +1,4 @@
-# Copyright (c) 2021, Oracle Corporation and/or its affiliates.
+# Copyright (c) 2021, 2024, Oracle Corporation and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
# Description: to create Load Balancer Service for the following targets.
@@ -8,7 +8,7 @@
# * [Optional] cluster T3 channel
#
# Special parameter example:
-# * lbSvcValues: [{"colName":"admin-t3","colTarget":"adminServerT3","colPort":"7005"},{"colName":"cluster","colTarget":"cluster1T3","colPort":"8011"}]
+# * LB_SVC_VALUES: [{"colName":"admin-t3","colTarget":"adminServerT3","colPort":"7005"},{"colName":"cluster","colTarget":"cluster1T3","colPort":"8011"}]
echo "Script ${0} starts"
@@ -19,10 +19,14 @@ kind: Service
metadata:
name: ${adminServerLBSVCName}
namespace: ${wlsDomainNS}
+ labels:
+ weblogic.domainUID: "${WLS_DOMAIN_UID}"
+ azure.weblogic.target: "${constAdminServerName}"
+ azure.weblogc.createdByWlsOffer: "true"
EOF
# to create internal load balancer service
- if [[ "${enableInternalLB,,}" == "true" ]]; then
+ if [[ "${USE_INTERNAL_LB,,}" == "true" ]]; then
cat <>${scriptDir}/admin-server-lb.yaml
annotations:
service.beta.kubernetes.io/azure-load-balancer-internal: "true"
@@ -37,7 +41,7 @@ spec:
protocol: TCP
targetPort: ${adminTargetPort}
selector:
- weblogic.domainUID: ${wlsDomainUID}
+ weblogic.domainUID: ${WLS_DOMAIN_UID}
weblogic.serverName: ${adminServerName}
sessionAffinity: None
type: LoadBalancer
@@ -51,10 +55,14 @@ kind: Service
metadata:
name: ${adminServerT3LBSVCName}
namespace: ${wlsDomainNS}
+ labels:
+ weblogic.domainUID: "${WLS_DOMAIN_UID}"
+ azure.weblogic.target: "${constAdminServerName}-t3-channel"
+ azure.weblogc.createdByWlsOffer: "true"
EOF
# to create internal load balancer service
- if [[ "${enableInternalLB,,}" == "true" ]]; then
+ if [[ "${USE_INTERNAL_LB,,}" == "true" ]]; then
cat <>${adminServerT3LBDefinitionPath}
annotations:
service.beta.kubernetes.io/azure-load-balancer-internal: "true"
@@ -69,7 +77,7 @@ spec:
protocol: TCP
targetPort: ${adminT3Port}
selector:
- weblogic.domainUID: ${wlsDomainUID}
+ weblogic.domainUID: ${WLS_DOMAIN_UID}
weblogic.serverName: ${adminServerName}
sessionAffinity: None
type: LoadBalancer
@@ -83,10 +91,14 @@ kind: Service
metadata:
name: ${clusterLBSVCName}
namespace: ${wlsDomainNS}
+ labels:
+ weblogic.domainUID: "${WLS_DOMAIN_UID}"
+ azure.weblogic.target: "${constClusterName}"
+ azure.weblogc.createdByWlsOffer: "true"
EOF
# to create internal load balancer service
- if [[ "${enableInternalLB,,}" == "true" ]]; then
+ if [[ "${USE_INTERNAL_LB,,}" == "true" ]]; then
cat <>${scriptDir}/cluster-lb.yaml
annotations:
service.beta.kubernetes.io/azure-load-balancer-internal: "true"
@@ -101,7 +113,7 @@ spec:
protocol: TCP
targetPort: ${clusterTargetPort}
selector:
- weblogic.domainUID: ${wlsDomainUID}
+ weblogic.domainUID: ${WLS_DOMAIN_UID}
weblogic.clusterName: ${clusterName}
sessionAffinity: None
type: LoadBalancer
@@ -115,10 +127,14 @@ kind: Service
metadata:
name: ${clusterT3LBSVCName}
namespace: ${wlsDomainNS}
+ labels:
+ weblogic.domainUID: "${WLS_DOMAIN_UID}"
+ azure.weblogic.target: "${constClusterName}-t3-channel"
+ azure.weblogc.createdByWlsOffer: "true"
EOF
# to create internal load balancer service
- if [[ "${enableInternalLB,,}" == "true" ]]; then
+ if [[ "${USE_INTERNAL_LB,,}" == "true" ]]; then
cat <>${clusterT3LBDefinitionPath}
annotations:
service.beta.kubernetes.io/azure-load-balancer-internal: "true"
@@ -133,7 +149,7 @@ spec:
protocol: TCP
targetPort: ${clusterT3Port}
selector:
- weblogic.domainUID: ${wlsDomainUID}
+ weblogic.domainUID: ${WLS_DOMAIN_UID}
weblogic.clusterName: ${clusterName}
sessionAffinity: None
type: LoadBalancer
@@ -141,17 +157,17 @@ EOF
}
function query_admin_target_port() {
- if [[ "${enableCustomSSL,,}" == "true" ]]; then
- adminTargetPort=$(utility_query_service_port ${svcAdminServer} ${wlsDomainNS} 'default-secure')
+ if [[ "${ENABLE_CUSTOM_SSL,,}" == "true" ]]; then
+ adminTargetPort=$(utility_query_service_port ${svcAdminServer} ${wlsDomainNS} 'internal-t3s')
else
- adminTargetPort=$(utility_query_service_port ${svcAdminServer} ${wlsDomainNS} 'default')
+ adminTargetPort=$(utility_query_service_port ${svcAdminServer} ${wlsDomainNS} 'internal-t3')
fi
echo "Admin port of ${adminServerName}: ${adminTargetPort}"
}
function query_cluster_target_port() {
- if [[ "${enableCustomSSL,,}" == "true" ]]; then
+ if [[ "${ENABLE_CUSTOM_SSL,,}" == "true" ]]; then
clusterTargetPort=$(utility_query_service_port ${svcCluster} ${wlsDomainNS} 'default-secure')
else
clusterTargetPort=$(utility_query_service_port ${svcCluster} ${wlsDomainNS} 'default')
@@ -181,9 +197,9 @@ function create_lb_svc_for_admin_server_default_channel() {
adminServerEndpoint=$(kubectl get svc ${adminServerLBSVCName} -n ${wlsDomainNS} \
-o=jsonpath='{.status.loadBalancer.ingress[0].ip}:{.spec.ports[0].port}')
- if [ "${enableCustomDNSAlias,,}" == "true" ]; then
- create_dns_A_record "${adminServerEndpoint%%:*}" ${dnsAdminLabel} ${dnsRGName} ${dnsZoneName}
- adminServerEndpoint="${dnsAdminLabel}.${dnsZoneName}:${adminServerEndpoint#*:}"
+ if [ "${ENABLE_DNS_CONFIGURATION,,}" == "true" ]; then
+ create_dns_A_record "${adminServerEndpoint%%:*}" ${DNS_ADMIN_LABEL} ${DNS_ZONE_RG_NAME} ${DNS_ZONE_NAME}
+ adminServerEndpoint="${DNS_ADMIN_LABEL}.${DNS_ZONE_NAME}:${adminServerEndpoint#*:}"
fi
adminConsoleEndpoint="${adminServerEndpoint}/console"
@@ -210,9 +226,9 @@ function create_lb_svc_for_admin_t3_channel() {
adminServerT3Endpoint=$(kubectl get svc ${adminServerT3LBSVCName} -n ${wlsDomainNS} \
-o=jsonpath='{.status.loadBalancer.ingress[0].ip}:{.spec.ports[0].port}')
- if [ "${enableCustomDNSAlias,,}" == "true" ]; then
- create_dns_A_record "${adminServerT3Endpoint%%:*}" "${dnszoneAdminT3ChannelLabel}" ${dnsRGName} ${dnsZoneName}
- adminServerT3Endpoint="${dnszoneAdminT3ChannelLabel}.${dnsZoneName}:${adminServerT3Endpoint#*:}"
+ if [ "${ENABLE_DNS_CONFIGURATION,,}" == "true" ]; then
+ create_dns_A_record "${adminServerT3Endpoint%%:*}" "${DNS_ADMIN_T3_LABEL}" ${DNS_ZONE_RG_NAME} ${DNS_ZONE_NAME}
+ adminServerT3Endpoint="${DNS_ADMIN_T3_LABEL}.${DNS_ZONE_NAME}:${adminServerT3Endpoint#*:}"
fi
}
@@ -234,9 +250,9 @@ function create_lb_svc_for_cluster_default_channel() {
clusterEndpoint=$(kubectl get svc ${clusterLBSVCName} -n ${wlsDomainNS} -o=jsonpath='{.status.loadBalancer.ingress[0].ip}:{.spec.ports[0].port}')
- if [ "${enableCustomDNSAlias,,}" == "true" ]; then
- create_dns_A_record "${clusterEndpoint%%:*}" ${dnsClusterLabel} ${dnsRGName} ${dnsZoneName}
- clusterEndpoint="${dnsClusterLabel}.${dnsZoneName}:${clusterEndpoint#*:}"
+ if [ "${ENABLE_DNS_CONFIGURATION,,}" == "true" ]; then
+ create_dns_A_record "${clusterEndpoint%%:*}" ${DNS_CLUSTER_LABEL} ${DNS_ZONE_RG_NAME} ${DNS_ZONE_NAME}
+ clusterEndpoint="${DNS_CLUSTER_LABEL}.${DNS_ZONE_NAME}:${clusterEndpoint#*:}"
fi
}
@@ -260,16 +276,16 @@ function create_lb_svc_for_cluster_t3_channel() {
clusterT3Endpoint=$(kubectl get svc ${clusterT3LBSVCName} -n ${wlsDomainNS} \
-o=jsonpath='{.status.loadBalancer.ingress[0].ip}:{.spec.ports[0].port}')
- if [ "${enableCustomDNSAlias,,}" == "true" ]; then
- create_dns_A_record "${clusterT3Endpoint%%:*}" ${dnszoneClusterT3ChannelLabel} ${dnsRGName} ${dnsZoneName}
- clusterT3Endpoint="${dnszoneClusterT3ChannelLabel}.${dnsZoneName}:${clusterT3Endpoint#*:}"
+ if [ "${ENABLE_DNS_CONFIGURATION,,}" == "true" ]; then
+ create_dns_A_record "${clusterT3Endpoint%%:*}" ${DNS_CLUSTER_T3_LABEL} ${DNS_ZONE_RG_NAME} ${DNS_ZONE_NAME}
+ clusterT3Endpoint="${DNS_CLUSTER_T3_LABEL}.${DNS_ZONE_NAME}:${clusterT3Endpoint#*:}"
fi
}
function patch_admin_t3_public_address() {
# patch admin t3 public address
- if [ "${enableCustomDNSAlias,,}" == "true" ]; then
- adminT3Address="${dnszoneAdminT3ChannelLabel}.${dnsZoneName}"
+ if [ "${ENABLE_DNS_CONFIGURATION,,}" == "true" ]; then
+ adminT3Address="${DNS_ADMIN_T3_LABEL}.${DNS_ZONE_NAME}"
else
adminT3Address=$(kubectl -n ${wlsDomainNS} get svc ${adminServerT3LBSVCName} -o json |
jq '. | .status.loadBalancer.ingress[0].ip' |
@@ -289,8 +305,8 @@ function patch_admin_t3_public_address() {
function patch_cluster_t3_public_address() {
#patch cluster t3 pubilc address
- if [ "${enableCustomDNSAlias,,}" == "true" ]; then
- clusterT3Adress="${dnszoneClusterT3ChannelLabel}.${dnsZoneName}"
+ if [ "${ENABLE_DNS_CONFIGURATION,,}" == "true" ]; then
+ clusterT3Adress="${DNS_CLUSTER_T3_LABEL}.${DNS_ZONE_NAME}"
else
clusterT3Adress=$(kubectl -n ${wlsDomainNS} get svc ${clusterT3LBSVCName} -o json |
jq '. | .status.loadBalancer.ingress[0].ip' |
@@ -310,7 +326,7 @@ function patch_cluster_t3_public_address() {
function rolling_update_with_t3_public_address() {
timestampBeforePatchingDomain=$(date +%s)
- currentDomainConfig=$(kubectl -n ${wlsDomainNS} get domain ${wlsDomainUID} -o json)
+ currentDomainConfig=$(kubectl -n ${wlsDomainNS} get domain ${WLS_DOMAIN_UID} -o json)
cat <${scriptDir}/domainPreviousConfiguration.yaml
${currentDomainConfig}
EOF
@@ -326,7 +342,7 @@ EOF
if [[ "${enableClusterT3Channel,,}" == "true" ]] || [[ "${enableAdminT3Channel,,}" == "true" ]]; then
# restart cluster
- restartVersion=$(kubectl -n ${wlsDomainNS} get domain ${wlsDomainUID} -o json |
+ restartVersion=$(kubectl -n ${wlsDomainNS} get domain ${WLS_DOMAIN_UID} -o json |
jq '. | .spec.restartVersion' |
tr -d "\"")
restartVersion=$((restartVersion + 1))
@@ -343,14 +359,15 @@ ${currentDomainConfig}
EOF
echo ${currentDomainConfig} | kubectl -n ${wlsDomainNS} apply -f -
- replicas=$(kubectl -n ${wlsDomainNS} get domain ${wlsDomainUID} -o json |
- jq '. | .spec.clusters[] | .replicas')
+ local clusterName=$(kubectl get cluster -n ${wlsDomainNS} -o json | jq -r '.items[0].metadata.name')
+ local replicas=$(kubectl -n ${wlsDomainNS} get cluster ${clusterName} -o json \
+ | jq '. | .spec.replicas')
# wait for the restart completed.
utility_wait_for_pod_restarted \
${timestampBeforePatchingDomain} \
${replicas} \
- ${wlsDomainUID} \
+ ${WLS_DOMAIN_UID} \
${checkPodStatusMaxAttemps} \
${checkPodStatusInterval}
@@ -373,10 +390,10 @@ function validate_admin_console_url() {
fi
adminTargetPort=$(kubectl get svc ${svcAdminServer} -n ${wlsDomainNS} -o json |
- jq '.spec.ports[] | select(.name=="default") | .port')
+ jq '.spec.ports[] | select(.name=="internal-t3") | .port')
local adminConsoleUrl="http://${svcAdminServer}.${wlsDomainNS}:${adminTargetPort}/console/"
- kubectl exec -it ${podName} -n ${wlsDomainNS} -c ${wlsContainerName} \
+ kubectl exec ${podName} -n ${wlsDomainNS} -c ${wlsContainerName} \
-- bash -c 'curl --write-out "%{http_code}\n" --silent --output /dev/null "'${adminConsoleUrl}'" | grep "302"'
if [ $? == 1 ]; then
@@ -412,17 +429,8 @@ function create_svc_lb() {
query_admin_target_port
query_cluster_target_port
- # Parse lb svc input values
- # Generate valid json
- ret=$(echo $lbSvcValues | sed "s/\:/\\\"\:\\\"/g" |
- sed "s/{/{\"/g" |
- sed "s/}/\"}/g" |
- sed "s/,/\",\"/g" |
- sed "s/}\",\"{/},{/g" |
- tr -d \(\))
-
cat <${scriptDir}/lbConfiguration.json
-${ret}
+${LB_SVC_VALUES}
EOF
array=$(jq -r '.[] | "\(.colName),\(.colTarget),\(.colPort)"' ${scriptDir}/lbConfiguration.json)
@@ -477,18 +485,6 @@ source ${scriptDir}/common.sh
source ${scriptDir}/utility.sh
source ${scriptDir}/createDnsRecord.sh
-enableInternalLB=$1
-enableCustomSSL=$2
-enableCustomDNSAlias=$3
-dnsRGName=$4
-dnsZoneName=$5
-dnsAdminLabel=$6
-dnszoneAdminT3ChannelLabel=$7
-dnsClusterLabel=$8
-dnszoneClusterT3ChannelLabel=$9
-lbSvcValues=${10}
-wlsDomainUID=${11}
-
adminConsoleEndpoint="null"
adminServerName=${constAdminServerName} # define in common.sh
adminServerT3Endpoint="null"
@@ -496,11 +492,11 @@ adminRemoteEndpoint="null"
clusterEndpoint="null"
clusterName=${constClusterName}
clusterT3Endpoint="null"
-svcAdminServer="${wlsDomainUID}-${adminServerName}"
-svcCluster="${wlsDomainUID}-cluster-${clusterName}"
-wlsDomainNS="${wlsDomainUID}-ns"
+svcAdminServer="${WLS_DOMAIN_UID}-${adminServerName}"
+svcCluster="${WLS_DOMAIN_UID}-cluster-${clusterName}"
+wlsDomainNS="${WLS_DOMAIN_UID}-ns"
-echo ${lbSvcValues}
+echo ${LB_SVC_VALUES}
create_svc_lb
diff --git a/weblogic-azure-aks/src/main/arm/scripts/createVMAndBuildImage.sh b/weblogic-azure-aks/src/main/arm/scripts/createVMAndBuildImage.sh
index 51aa393db..8f1820887 100644
--- a/weblogic-azure-aks/src/main/arm/scripts/createVMAndBuildImage.sh
+++ b/weblogic-azure-aks/src/main/arm/scripts/createVMAndBuildImage.sh
@@ -1,65 +1,99 @@
# Copyright (c) 2021, Oracle Corporation and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
+# env inputs:
+# URL_3RD_DATASOURCE
+# ORACLE_ACCOUNT_ENTITLED
-# read and from stdin
+echo "Script ${0} starts"
+
+# read from stdin
function read_sensitive_parameters_from_stdin() {
- read azureACRPassword ocrSSOPSW
+ read acrShibboleth
}
function cleanup_vm() {
+ echo "deleting vm resources..."
#Remove VM resources
az extension add --name resource-graph
# query vm id
vmId=$(az graph query -q "Resources \
| where type =~ 'microsoft.compute/virtualmachines' \
| where name=~ '${vmName}' \
-| where resourceGroup =~ '${currentResourceGroup}' \
-| project vmid = id" -o tsv)
+| where resourceGroup =~ '${CURRENT_RESOURCEGROUP_NAME}' \
+| project vmid = id" --query "data[0].vmid" -o tsv)
# query nic id
nicId=$(az graph query -q "Resources \
| where type =~ 'microsoft.compute/virtualmachines' \
| where name=~ '${vmName}' \
-| where resourceGroup =~ '${currentResourceGroup}' \
+| where resourceGroup =~ '${CURRENT_RESOURCEGROUP_NAME}' \
| extend nics=array_length(properties.networkProfile.networkInterfaces) \
| mv-expand nic=properties.networkProfile.networkInterfaces \
| where nics == 1 or nic.properties.primary =~ 'true' or isempty(nic) \
-| project nicId = tostring(nic.id)" -o tsv)
-
- # query ip id
- ipId=$(az graph query -q "Resources \
-| where type =~ 'microsoft.network/networkinterfaces' \
-| where id=~ '${nicId}' \
-| extend ipConfigsCount=array_length(properties.ipConfigurations) \
-| mv-expand ipconfig=properties.ipConfigurations \
-| where ipConfigsCount == 1 or ipconfig.properties.primary =~ 'true' \
-| project publicIpId = tostring(ipconfig.properties.publicIPAddress.id)" -o tsv)
+| project nicId = tostring(nic.id)" --query "data[0].nicId" -o tsv)
# query os disk id
osDiskId=$(az graph query -q "Resources \
| where type =~ 'microsoft.compute/virtualmachines' \
| where name=~ '${vmName}' \
-| where resourceGroup =~ '${currentResourceGroup}' \
-| project osDiskId = tostring(properties.storageProfile.osDisk.managedDisk.id)" -o tsv)
+| where resourceGroup =~ '${CURRENT_RESOURCEGROUP_NAME}' \
+| project osDiskId = tostring(properties.storageProfile.osDisk.managedDisk.id)" --query "data[0].osDiskId" -o tsv)
# query vnet id
vnetId=$(az graph query -q "Resources \
| where type =~ 'Microsoft.Network/virtualNetworks' \
| where name=~ '${vmName}VNET' \
-| where resourceGroup =~ '${currentResourceGroup}' \
-| project vNetId = id" -o tsv)
+| where resourceGroup =~ '${CURRENT_RESOURCEGROUP_NAME}' \
+| project vNetId = id" --query "data[0].vNetId" -o tsv)
# query nsg id
nsgId=$(az graph query -q "Resources \
| where type =~ 'Microsoft.Network/networkSecurityGroups' \
| where name=~ '${vmName}NSG' \
-| where resourceGroup =~ '${currentResourceGroup}' \
-| project nsgId = id" -o tsv)
+| where resourceGroup =~ '${CURRENT_RESOURCEGROUP_NAME}' \
+| project nsgId = id" --query "data[0].nsgId" -o tsv)
+
+ #query public ip id
+ publicIpId=$(az graph query -q "Resources \
+| where type =~ 'Microsoft.Network/publicIPAddresses' \
+| where name =~ '${vmName}PublicIP' \
+| where resourceGroup =~ '${CURRENT_RESOURCEGROUP_NAME}' \
+| project publicIpId = id" --query "data[0].publicIpId" -o tsv)
# Delete VM NIC IP VNET NSG resoruces
- vmResourceIdS=$(echo ${vmId} ${nicId} ${ipId} ${osDiskId} ${vnetId} ${nsgId})
- echo ${vmResourceIdS}
- az resource delete --verbose --ids ${vmResourceIdS}
+ echo "deleting vm ${vmId}"
+ az vm delete --ids $vmId --yes
+ echo "deleting nic ${nicId}"
+ az network nic delete --ids ${nicId}
+ echo "deleting public ip ${publicIpId}"
+ az network public-ip delete --ids $publicIpId
+ echo "deleting disk ${osDiskId}"
+ az disk delete --yes --ids ${osDiskId}
+ echo "deleting vnet ${vnetId}"
+ az network vnet delete --ids ${vnetId}
+ echo "deleting nsg ${nsgId}"
+ az network nsg delete --ids ${nsgId}
+}
+
+# generate image full path based on the oracle account
+function get_ocr_image_full_path() {
+ local ocrImageFullPath="${ocrLoginServer}/${ocrGaImagePath}:${WLS_IMAGE_TAG}"
+
+ if [[ "${ORACLE_ACCOUNT_ENTITLED,,}" == "true" ]]; then
+
+ # download the ga cpu image mapping file.
+ local cpuImagesListFile=weblogic_cpu_images.json
+ curl -L ${gitUrl4CpuImages} --retry ${retryMaxAttempt} -o ${cpuImagesListFile}
+ local cpuTag=$(cat ${cpuImagesListFile} | jq ".items[] | select(.gaTag==\"${WLS_IMAGE_TAG}\") | .cpuTag" | tr -d "\"")
+ # if we can not find a matched image, keep the tag name the same as GA tag.
+ if [[ "${cpuTag}" == "" || "${cpuTag,,}" == "null" ]]; then
+ cpuTag=${WLS_IMAGE_TAG}
+ fi
+
+ ocrImageFullPath="${ocrLoginServer}/${ocrCpuImagePath}:${cpuTag}"
+ fi
+
+ wlsImagePath=${ocrImageFullPath}
}
# Build docker image
@@ -71,55 +105,83 @@ function build_docker_image() {
# Create vm to build docker image
vmName="VM-UBUNTU-WLS-AKS-$(date +%s)"
+ # az vm image list --publisher Canonical --offer 0001-com-ubuntu-server-focal --all -o table
+ ubuntuImage="Canonical:ubuntu-24_04-lts:server:latest"
+
+ if [[ "${CPU_PLATFORM}" == "${constARM64Platform}" ]]; then
+ ubuntuImage="Canonical:ubuntu-24_04-lts:server-arm64:latest"
+ fi
+
+ # query AKS vm size
+ # use the same VM size to create the Ubuntu machine, make sure the architecture is matched.
+ local vmSize=$(az aks show --name ${AKS_CLUSTER_NAME} --resource-group ${AKS_CLUSTER_RESOURCEGROUP_NAME} \
+ | jq '.agentPoolProfiles[] | select(.name=="'${AKS_NODE_POOL_NAME}'") | .vmSize' \
+ | tr -d "\"")
+
+ # if vmSize is empty or null, exit
+ if [[ "${vmSize}" == "" || "${vmSize}" == "null" ]]; then
+ echo_stderr "Failed to obtain VM size of AKS ${AKS_CLUSTER_NAME} in ${AKS_CLUSTER_RESOURCEGROUP_NAME}."
+ exit 1
+ fi
+
+ echo_stdout "TAG_VM: ${TAG_VM}"
+ export TAG_VM=$(echo "${TAG_VM}" \
+ | jq -r 'to_entries | map("\"" + .key + "\"=" + (if .value|type == "string" then "\"\(.value)\"" else "\(.value)" end)) | join(" ")')
+
+ publicIPName="${vmName}PublicIP"
+
# MICROSOFT_INTERNAL
# Specify tag 'SkipASMAzSecPack' to skip policy 'linuxazuresecuritypackautodeployiaas_1.6'
# Specify tag 'SkipNRMS*' to skip Microsoft internal NRMS policy, which causes vm-redeployed issue
az vm create \
- --resource-group ${currentResourceGroup} \
+ --resource-group ${CURRENT_RESOURCEGROUP_NAME} \
--name ${vmName} \
- --image "Canonical:UbuntuServer:18.04-LTS:latest" \
+ --image "${ubuntuImage}" \
--admin-username azureuser \
--generate-ssh-keys \
--nsg-rule NONE \
--enable-agent true \
--vnet-name ${vmName}VNET \
--enable-auto-update false \
- --tags SkipASMAzSecPack=true SkipNRMSCorp=true SkipNRMSDatabricks=true SkipNRMSDB=true SkipNRMSHigh=true SkipNRMSMedium=true SkipNRMSRDPSSH=true SkipNRMSSAW=true SkipNRMSMgmt=true --verbose
-
- wlsImagePath="${ocrLoginServer}/middleware/weblogic:${wlsImageTag}"
+ --public-ip-address ${publicIPName} \
+ --size ${vmSize} \
+ --tags ${TAG_VM} SkipASMAzSecPack=true SkipNRMSCorp=true SkipNRMSDatabricks=true SkipNRMSDB=true SkipNRMSHigh=true SkipNRMSMedium=true SkipNRMSRDPSSH=true SkipNRMSSAW=true SkipNRMSMgmt=true --verbose
+
+ if [[ "${USE_ORACLE_IMAGE,,}" == "${constTrue}" ]]; then
+ get_ocr_image_full_path
+ else
+ wlsImagePath="${USER_PROVIDED_IMAGE_PATH}"
+ fi
+
+ echo_stdout "wlsImagePath: ${wlsImagePath}"
+ URL_3RD_DATASOURCE=$(echo $URL_3RD_DATASOURCE | tr -d "\"") # remove " from the string
+ URL_3RD_DATASOURCE=$(echo $URL_3RD_DATASOURCE | base64 -w0)
+ # Tag for VM extension is not supported yet, see https://github.com/Azure/azure-cli/issues/14341
az vm extension set --name CustomScript \
- --extension-instance-name wls-image-script \
- --resource-group ${currentResourceGroup} \
- --vm-name ${vmName} \
- --publisher Microsoft.Azure.Extensions \
- --version 2.0 \
- --settings "{ \"fileUris\": [\"${scriptURL}model.properties\",\"${scriptURL}genImageModel.sh\",\"${scriptURL}buildWLSDockerImage.sh\",\"${scriptURL}common.sh\"]}" \
- --protected-settings "{\"commandToExecute\":\"echo ${azureACRPassword} ${ocrSSOPSW} | bash buildWLSDockerImage.sh ${wlsImagePath} ${azureACRServer} ${azureACRUserName} ${newImageTag} \\\"${appPackageUrls}\\\" ${ocrSSOUser} ${wlsClusterSize} ${enableCustomSSL} ${enableAdminT3Tunneling} ${enableClusterT3Tunneling} \"}"
+ --extension-instance-name wls-image-script \
+ --resource-group ${CURRENT_RESOURCEGROUP_NAME} \
+ --vm-name ${vmName} \
+ --publisher Microsoft.Azure.Extensions \
+ --version 2.0 \
+ --settings "{ \"fileUris\": [\"${SCRIPT_LOCATION}model.properties\",\"${SCRIPT_LOCATION}genImageModel.sh\",\"${SCRIPT_LOCATION}buildWLSDockerImage.sh\",\"${SCRIPT_LOCATION}common.sh\"]}" \
+ --protected-settings "{\"commandToExecute\":\"echo ${acrShibboleth} ${ORACLE_ACCOUNT_SHIBBOLETH} | bash buildWLSDockerImage.sh ${wlsImagePath} ${acrLoginServer} ${acrUser} ${newImageTag} ${WLS_APP_PACKAGE_URLS} ${ORACLE_ACCOUNT_NAME} ${WLS_CLUSTER_SIZE} ${ENABLE_CUSTOM_SSL} ${ENABLE_ADMIN_CUSTOM_T3} ${ENABLE_CLUSTER_CUSTOM_T3} ${USE_ORACLE_IMAGE} ${URL_3RD_DATASOURCE} ${ENABLE_SHIBBOLETHLESS_DB_CONNECTION} ${DB_TYPE} ${CPU_PLATFORM} \"}"
cleanup_vm
}
# Shell Global settings
-set -e #Exit immediately if a command exits with a non-zero status.
+set -Eeo pipefail #Exit immediately if a command exits with a non-zero status.
# Main script
export script="${BASH_SOURCE[0]}"
export scriptDir="$(cd "$(dirname "${script}")" && pwd)"
source ${scriptDir}/common.sh
+source ${scriptDir}/utility.sh
-export currentResourceGroup=$1
-export wlsImageTag=$2
-export azureACRServer=$3
-export azureACRUserName=$4
-export newImageTag=$5
-export appPackageUrls=$6
-export ocrSSOUser=$7
-export wlsClusterSize=$8
-export enableCustomSSL=$9
-export scriptURL=${10}
-export enableAdminT3Tunneling=${11}
-export enableClusterT3Tunneling=${12}
+export newImageTag=$1
+export acrLoginServer=$2
+export acrUser=$3
read_sensitive_parameters_from_stdin
diff --git a/weblogic-azure-aks/src/main/arm/scripts/dbUtility.sh b/weblogic-azure-aks/src/main/arm/scripts/dbUtility.sh
index 9bc9c86ff..23a65d7b8 100644
--- a/weblogic-azure-aks/src/main/arm/scripts/dbUtility.sh
+++ b/weblogic-azure-aks/src/main/arm/scripts/dbUtility.sh
@@ -3,20 +3,25 @@
echo "Script ${0} starts"
-# read from stdin
-function read_sensitive_parameters_from_stdin() {
- read dbPassword
-}
-
function generate_ds_model() {
databaseDriver=${driverOracle}
databaseTestTableName=${testTableOracle}
- if [[ "${databaseType}" == "${dbTypePostgre}" ]]; then
+ if [[ "${DATABASE_TYPE}" == "${dbTypePostgre}" ]]; then
databaseDriver=${driverPostgre}
databaseTestTableName=${testTablePostgre}
- elif [[ "${databaseType}" == "${dbTypeSQLServer}" ]]; then
+ elif [[ "${DATABASE_TYPE}" == "${dbTypeSQLServer}" ]]; then
databaseDriver=${driverSQLServer}
databaseTestTableName=${testTableSQLServer}
+ elif [[ "${DATABASE_TYPE}" == "${dbTypeMySQL}" ]]; then
+ databaseDriver=${driverMySQL}
+ databaseTestTableName=${testTableMySQL}
+
+ if [[ "${ENABLE_SHIBBOLETHLESS_CONNECTION,,}" == "true" ]]; then
+ databaseDriver=${driverMySQLCj}
+ fi
+ elif [[ "${DATABASE_TYPE}" == "${dbTypeOthers}" ]]; then
+ databaseDriver=${DB_DRIVER_NAME}
+ databaseTestTableName=${TEST_TABLE_NAME}
fi
echo "generate data source model file"
@@ -24,7 +29,7 @@ function generate_ds_model() {
dsModelFilePath=$scriptDir/${dbSecretName}.yaml
bash $scriptDir/genDatasourceModel.sh \
${dsModelFilePath} \
- "${jdbcDataSourceName}" \
+ "${JDBC_DATASOURCE_NAME}" \
"${clusterName}" \
"${databaseDriver}" \
"${databaseTestTableName}" \
@@ -68,11 +73,11 @@ function export_models_and_delete_configmap() {
function cleanup_secret_and_model() {
echo "check if the datasource secret exists"
- jndiLabel=${jdbcDataSourceName//\//\_}
+ jndiLabel=${JDBC_DATASOURCE_NAME//\//\_}
secretLen=$(kubectl get secret -n ${domainNamespace} -l datasource.JNDI="${jndiLabel}" -o json |
jq '.items | length')
if [ ${secretLen} -ge 1 ]; then
- echo "secret for ${jdbcDataSourceName} exists"
+ echo "secret for ${JDBC_DATASOURCE_NAME} exists"
# delete the secrets
index=0
while [ $index -lt ${secretLen} ]; do
@@ -93,16 +98,16 @@ function cleanup_secret_and_model() {
function create_datasource_secret() {
cleanup_secret_and_model
- echo "create/update secret ${dbSecretName} for ${jdbcDataSourceName}"
+ echo "create/update secret ${dbSecretName} for ${JDBC_DATASOURCE_NAME}"
kubectl -n ${domainNamespace} create secret generic \
${dbSecretName} \
- --from-literal=password="${dbPassword}" \
- --from-literal=url="${dsConnectionURL}" \
- --from-literal=user="${dbUser}"
+ --from-literal=password="${DB_SHIBBOLETH}" \
+ --from-literal=url="${DB_CONNECTION_STRING}" \
+ --from-literal=user="${DB_USER}"
kubectl -n sample-domain1-ns label secret \
${dbSecretName} \
- weblogic.domainUID=${wlsDomainUID} \
+ weblogic.domainUID=${WLS_DOMAIN_UID} \
datasource.JNDI="${jndiLabel}"
}
@@ -118,7 +123,7 @@ function update_configmap() {
kubectl -n ${domainNamespace} create configmap ${wlsConfigmapName} \
--from-file=${modelFilePath}
kubectl -n ${domainNamespace} label configmap ${wlsConfigmapName} \
- weblogic.domainUID=${wlsDomainUID}
+ weblogic.domainUID=${WLS_DOMAIN_UID}
}
function delete_model_and_secret() {
@@ -132,36 +137,36 @@ function delete_model_and_secret() {
kubectl -n ${domainNamespace} create configmap ${wlsConfigmapName} \
--from-file=${modelFilePath}
kubectl -n ${domainNamespace} label configmap ${wlsConfigmapName} \
- weblogic.domainUID=${wlsDomainUID}
+ weblogic.domainUID=${WLS_DOMAIN_UID}
}
# Main script
+set -Eo pipefail
+
export script="${BASH_SOURCE[0]}"
export scriptDir="$(cd "$(dirname "${script}")" && pwd)"
-export databaseType=$1
-export dbUser=$2
-export dsConnectionURL=$3
-export jdbcDataSourceName=$4
-export wlsDomainUID=$5
-export dbSecretName=$6
-export operationType=$7
+export dbSecretName=$1
+export operationType=$2
-export domainNamespace=${wlsDomainUID}-ns
+export domainNamespace=${WLS_DOMAIN_UID}-ns
export clusterName="cluster-1"
export dbTypeOracle="oracle"
export dbTypePostgre="postgresql"
export dbTypeSQLServer="sqlserver"
+export dbTypeMySQL='mysql'
+export dbTypeOthers="otherdb"
export driverOracle="oracle.jdbc.OracleDriver"
export driverPostgre="org.postgresql.Driver"
export driverSQLServer="com.microsoft.sqlserver.jdbc.SQLServerDriver"
+export driverMySQL="com.mysql.jdbc.Driver"
+export driverMySQLCj="com.mysql.cj.jdbc.Driver"
export optTypeDelete='delete'
export testTableOracle="SQL ISVALID"
export testTablePostgre="SQL SELECT 1"
export testTableSQLServer="SQL SELECT 1"
-export wlsConfigmapName="${wlsDomainUID}-wdt-config-map"
-
-read_sensitive_parameters_from_stdin
+export testTableMySQL="SQL SELECT 1"
+export wlsConfigmapName="${WLS_DOMAIN_UID}-wdt-config-map"
if [[ "${operationType}" == "${optTypeDelete}" ]]; then
delete_model_and_secret
diff --git a/weblogic-azure-aks/src/main/arm/scripts/genDatasourceModel.sh b/weblogic-azure-aks/src/main/arm/scripts/genDatasourceModel.sh
index 19c9543e7..ec7e5ad0e 100644
--- a/weblogic-azure-aks/src/main/arm/scripts/genDatasourceModel.sh
+++ b/weblogic-azure-aks/src/main/arm/scripts/genDatasourceModel.sh
@@ -25,7 +25,7 @@ resources:
JNDIName: [
${jndiName}
]
- GlobalTransactionsProtocol: TwoPhaseCommit
+ GlobalTransactionsProtocol: ${GLOBAL_TRANSATION_PROTOCOL}
JDBCDriverParams:
DriverName: ${driver}
URL: '@@SECRET:${secretName}:url@@'
diff --git a/weblogic-azure-aks/src/main/arm/scripts/genDomainConfig.sh b/weblogic-azure-aks/src/main/arm/scripts/genDomainConfig.sh
index d4072b1d4..5636f9a05 100644
--- a/weblogic-azure-aks/src/main/arm/scripts/genDomainConfig.sh
+++ b/weblogic-azure-aks/src/main/arm/scripts/genDomainConfig.sh
@@ -5,24 +5,25 @@ export script="${BASH_SOURCE[0]}"
export scriptDir="$(cd "$(dirname "${script}")" && pwd)"
export filePath=$1
-export replicas=$2
-export wlsCPU=$3
-export wlsDomainUID=$4
-export wlsDomainName=$5
-export wlsImagePath=$6
-export wlsMemory=$7
-export wlsManagedPrefix=$8
-export enableSSL=${9}
-export enablePV=${10}
-export enableAdminT3Tunneling=${11}
-export enableClusterT3Tunneling=${12}
-export t3AdminPort=${13}
-export t3ClusterPort=${14}
-export clusterName=${15}
-export javaOptions=${16}
-
-export adminServiceUrl="${wlsDomainUID}-admin-server.${wlsDomainUID}-ns.svc.cluster.local"
-export clusterServiceUrl="${wlsDomainUID}-cluster-${clusterName}.${wlsDomainUID}-ns.svc.cluster.local"
+export wlsImagePath=$2
+export javaOptions=$3
+
+export adminServiceUrl="${WLS_DOMAIN_UID}-admin-server.${WLS_DOMAIN_UID}-ns.svc.cluster.local"
+export clusterServiceUrl="${WLS_DOMAIN_UID}-cluster-${constClusterName}.${WLS_DOMAIN_UID}-ns.svc.cluster.local"
+
+# set classpath
+preClassPath=""
+classPath="/u01/domains/${WLS_DOMAIN_UID}/wlsdeploy/${externalJDBCLibrariesDirectoryName}/*"
+
+if [[ "${DB_TYPE}" == "mysql" ]]; then
+ preClassPath="/u01/domains/${WLS_DOMAIN_UID}/wlsdeploy/${constPreclassDirectoryName}/*:"
+fi
+
+if [[ "${ENABLE_SHIBBOLETHLESS_DB_CONNECTION,,}" == "true" ]] && [[ "${DB_TYPE}" == "mysql" || "${DB_TYPE}" == "postgresql" ]]; then
+ # append jackson libraries to pre-classpath to upgrade existing libs in GA images
+ preClassPath="${preClassPath}/u01/domains/${WLS_DOMAIN_UID}/wlsdeploy/classpathLibraries/jackson/*"
+ classPath="${classPath}:/u01/domains/${WLS_DOMAIN_UID}/wlsdeploy/classpathLibraries/azureLibraries/*"
+fi
cat <$filePath
# Copyright (c) 2021, Oracle Corporation and/or its affiliates.
@@ -32,13 +33,13 @@ cat <$filePath
# in https://github.com/oracle/weblogic-kubernetes-operator.
# This is an example of how to define a Domain resource.
#
-apiVersion: "weblogic.oracle/v8"
+apiVersion: "weblogic.oracle/v9"
kind: Domain
metadata:
- name: "${wlsDomainUID}"
- namespace: "${wlsDomainUID}-ns"
+ name: "${WLS_DOMAIN_UID}"
+ namespace: "${WLS_DOMAIN_UID}-ns"
labels:
- weblogic.domainUID: "${wlsDomainUID}"
+ weblogic.domainUID: "${WLS_DOMAIN_UID}"
spec:
# Set to 'FromModel' to indicate 'Model in Image'.
@@ -46,9 +47,9 @@ spec:
# The WebLogic Domain Home, this must be a location within
# the image for 'Model in Image' domains.
- domainHome: /u01/domains/${wlsDomainUID}
+ domainHome: /u01/domains/${WLS_DOMAIN_UID}
- # The WebLogic Server Docker image that the Operator uses to start the domain
+ # The WebLogic Server image that the Operator uses to start the domain
image: "${wlsImagePath}"
# Defaults to "Always" if image tag (version) is ':latest'
@@ -61,7 +62,7 @@ spec:
# Identify which Secret contains the WebLogic Admin credentials,
# the secret must contain 'username' and 'password' fields.
webLogicCredentialsSecret:
- name: "${wlsDomainUID}-weblogic-credentials"
+ name: "${WLS_DOMAIN_UID}-weblogic-credentials"
# Whether to include the WebLogic Server stdout in the pod's stdout, default is true
includeServerOutInPodLog: true
@@ -71,88 +72,102 @@ spec:
# The location for domain log, server logs, server out, introspector out, and Node Manager log files
# see also 'logHomeEnabled', 'volumes', and 'volumeMounts'.
- #logHome: /shared/logs/${wlsDomainUID}
+ #logHome: /shared/logs/${WLS_DOMAIN_UID}
# Set which WebLogic Servers the Operator will start
- # - "NEVER" will not start any server in the domain
- # - "ADMIN_ONLY" will start up only the administration server (no managed servers will be started)
- # - "IF_NEEDED" will start all non-clustered servers, including the administration server, and clustered servers up to their replica count.
- serverStartPolicy: "IF_NEEDED"
+ # - "Never" will not start any server in the domain
+ # - "AdminOnly" will start up only the administration server (no managed servers will be started)
+ # - "IfNeeded" will start all non-clustered servers, including the administration server, and clustered servers up to their replica count.
+ serverStartPolicy: IfNeeded
# Settings for all server pods in the domain including the introspector job pod
serverPod:
+ # Tune for small VM sizes
+ # https://oracle.github.io/weblogic-kubernetes-operator/managing-domains/domain-lifecycle/liveness-readiness-probe-customization/
+ livenessProbe:
+ periodSeconds: ${constLivenessProbePeriodSeconds}
+ timeoutSeconds: ${constLivenessProbeTimeoutSeconds}
+ failureThreshold: ${constLivenessProbeFailureThreshold}
+ readinessProbe:
+ periodSeconds: ${constReadinessProbeProbePeriodSeconds}
+ timeoutSeconds: ${constReadinessProbeTimeoutSeconds}
+ failureThreshold: ${constReadinessProbeFailureThreshold}
# Optional new or overridden environment variables for the domain's pods
# - This sample uses CUSTOM_DOMAIN_NAME in its image model file
# to set the Weblogic domain name
env:
- name: CUSTOM_DOMAIN_NAME
- value: "${wlsDomainName}"
+ value: "${WLS_DOMAIN_NAME}"
- name: JAVA_OPTIONS
- value: "-Dweblogic.StdoutDebugEnabled=false ${javaOptions}"
+ value: "${constDefaultJavaOptions} ${javaOptions}"
- name: USER_MEM_ARGS
- value: "-Djava.security.egd=file:/dev/./urandom -Xms256m -Xmx512m -XX:MinRAMPercentage=25.0 -XX:MaxRAMPercentage=50.0 "
+ value: "${constDefaultJVMArgs}"
- name: MANAGED_SERVER_PREFIX
- value: "${wlsManagedPrefix}"
+ value: "${WLS_MANAGED_SERVER_PREFIX}"
+ - name: PRE_CLASSPATH
+ value: "${preClassPath}"
+ - name: CLASSPATH
+ value: "${classPath}"
EOF
-if [[ "${enableSSL,,}" == "true" ]]; then
- cat <>$filePath
+if [[ "${ENABLE_CUSTOM_SSL,,}" == "true" ]]; then
+ cat <>$filePath
- name: SSL_IDENTITY_PRIVATE_KEY_ALIAS
valueFrom:
secretKeyRef:
key: sslidentitykeyalias
- name: ${wlsDomainUID}-weblogic-ssl-credentials
+ name: ${WLS_DOMAIN_UID}-weblogic-ssl-credentials
- name: SSL_IDENTITY_PRIVATE_KEY_PSW
valueFrom:
secretKeyRef:
key: sslidentitykeypassword
- name: ${wlsDomainUID}-weblogic-ssl-credentials
+ name: ${WLS_DOMAIN_UID}-weblogic-ssl-credentials
- name: SSL_IDENTITY_PRIVATE_KEYSTORE_PATH
valueFrom:
secretKeyRef:
key: sslidentitystorepath
- name: ${wlsDomainUID}-weblogic-ssl-credentials
+ name: ${WLS_DOMAIN_UID}-weblogic-ssl-credentials
- name: SSL_IDENTITY_PRIVATE_KEYSTORE_TYPE
valueFrom:
secretKeyRef:
key: sslidentitystoretype
- name: ${wlsDomainUID}-weblogic-ssl-credentials
+ name: ${WLS_DOMAIN_UID}-weblogic-ssl-credentials
- name: SSL_IDENTITY_PRIVATE_KEYSTORE_PSW
valueFrom:
secretKeyRef:
key: sslidentitystorepassword
- name: ${wlsDomainUID}-weblogic-ssl-credentials
+ name: ${WLS_DOMAIN_UID}-weblogic-ssl-credentials
- name: SSL_TRUST_KEYSTORE_PATH
valueFrom:
secretKeyRef:
key: ssltruststorepath
- name: ${wlsDomainUID}-weblogic-ssl-credentials
+ name: ${WLS_DOMAIN_UID}-weblogic-ssl-credentials
- name: SSL_TRUST_KEYSTORE_TYPE
valueFrom:
secretKeyRef:
key: ssltruststoretype
- name: ${wlsDomainUID}-weblogic-ssl-credentials
+ name: ${WLS_DOMAIN_UID}-weblogic-ssl-credentials
- name: SSL_TRUST_KEYSTORE_PSW
valueFrom:
secretKeyRef:
key: ssltruststorepassword
- name: ${wlsDomainUID}-weblogic-ssl-credentials
+ name: ${WLS_DOMAIN_UID}-weblogic-ssl-credentials
EOF
- fi
+fi
-if [[ "${enableAdminT3Tunneling,,}" == "true" ]]; then
+if [[ "${ENABLE_ADMIN_CUSTOM_T3,,}" == "true" ]]; then
cat <>$filePath
- name: T3_TUNNELING_ADMIN_PORT
- value: "${t3AdminPort}"
+ value: "${WLS_T3_ADMIN_PORT}"
- name: T3_TUNNELING_ADMIN_ADDRESS
value: "${adminServiceUrl}"
EOF
fi
-if [[ "${enableClusterT3Tunneling,,}" == "true" ]]; then
+if [[ "${ENABLE_CLUSTER_CUSTOM_T3,,}" == "true" ]]; then
cat <>$filePath
- name: T3_TUNNELING_CLUSTER_PORT
- value: "${t3ClusterPort}"
+ value: "${WLS_T3_CLUSTER_PORT}"
- name: T3_TUNNELING_CLUSTER_ADDRESS
value: "${clusterServiceUrl}"
EOF
@@ -162,30 +177,34 @@ fi
cat <>$filePath
resources:
requests:
- cpu: "${wlsCPU}"
- memory: "${wlsMemory}"
+ cpu: "${WLS_RESOURCE_REQUEST_CPU}"
+ memory: "${WLS_RESOURCE_REQUEST_MEMORY}"
EOF
-if [[ "${enablePV,,}" == "true" ]]; then
+# enable db pod identity, all of the selector of pod identities are "db-pod-idenity"
+if [[ "${ENABLE_SHIBBOLETHLESS_DB_CONNECTION,,}" == "true" ]]; then
+ cat <>$filePath
+ labels:
+ aadpodidbinding: "${constDbPodIdentitySelector}"
+EOF
+fi
+
+if [[ "${ENABLE_PV,,}" == "true" ]]; then
cat <>$filePath
# Optional volumes and mounts for the domain's pods. See also 'logHome'.
volumes:
- - name: ${wlsDomainUID}-pv-azurefile
+ - name: ${WLS_DOMAIN_UID}-pv-azurefile
persistentVolumeClaim:
- claimName: ${wlsDomainUID}-pvc-azurefile
+ claimName: ${WLS_DOMAIN_UID}-pvc-azurefile
volumeMounts:
- mountPath: /shared
- name: ${wlsDomainUID}-pv-azurefile
+ name: ${WLS_DOMAIN_UID}-pv-azurefile
EOF
fi
cat <>$filePath
# The desired behavior for starting the domain's administration server.
adminServer:
- # The serverStartState legal values are "RUNNING" or "ADMIN"
- # "RUNNING" means the listed server will be started up to "RUNNING" mode
- # "ADMIN" means the listed server will be start up to "ADMIN" mode
- serverStartState: "RUNNING"
# Setup a Kubernetes node port for the administration server default channel
#adminService:
# channels:
@@ -195,27 +214,9 @@ cat <>$filePath
# The number of admin servers to start for unlisted clusters
replicas: 1
- # The desired behavior for starting a specific cluster's member servers
+ # The name of each Cluster resource
clusters:
- - clusterName: cluster-1
- serverStartState: "RUNNING"
- serverPod:
- # Instructs Kubernetes scheduler to prefer nodes for new cluster members where there are not
- # already members of the same cluster.
- affinity:
- podAntiAffinity:
- preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
- podAffinityTerm:
- labelSelector:
- matchExpressions:
- - key: "weblogic.clusterName"
- operator: In
- values:
- - \$(CLUSTER_NAME)
- topologyKey: "kubernetes.io/hostname"
- # The number of managed servers to start for unlisted clusters
- replicas: ${replicas}
+ - name: ${WLS_DOMAIN_UID}-cluster-1
# Change the restartVersion to force the introspector job to rerun
# and apply any new model configuration, to also force a subsequent
@@ -230,13 +231,31 @@ cat <>$filePath
domainType: "WLS"
# Optional configmap for additional models and variable files
- #configMap: ${wlsDomainUID}-wdt-config-map
+ #configMap: ${WLS_DOMAIN_UID}-wdt-config-map
# All 'FromModel' domains require a runtimeEncryptionSecret with a 'password' field
- runtimeEncryptionSecret: "${wlsDomainUID}-runtime-encryption-secret"
+ runtimeEncryptionSecret: "${WLS_DOMAIN_UID}-runtime-encryption-secret"
# Secrets that are referenced by model yaml macros
# (the model yaml in the optional configMap or in the image)
#secrets:
- #- ${wlsDomainUID}-datasource-secret
-EOF
\ No newline at end of file
+ #- ${WLS_DOMAIN_UID}-datasource-secret
+
+---
+
+apiVersion: "weblogic.oracle/v1"
+kind: Cluster
+metadata:
+ name: ${WLS_DOMAIN_UID}-cluster-1
+ # Update this with the namespace your domain will run in:
+ namespace: ${WLS_DOMAIN_UID}-ns
+ labels:
+ # Update this with the domainUID of your domain:
+ weblogic.domainUID: ${WLS_DOMAIN_UID}
+spec:
+ # This must match a cluster name that is specified in the WebLogic configuration
+ clusterName: cluster-1
+ # The number of managed servers to start for this cluster
+ replicas: 2
+
+EOF
diff --git a/weblogic-azure-aks/src/main/arm/scripts/genImageModel.sh b/weblogic-azure-aks/src/main/arm/scripts/genImageModel.sh
index 5daf9dad9..106bd86a3 100644
--- a/weblogic-azure-aks/src/main/arm/scripts/genImageModel.sh
+++ b/weblogic-azure-aks/src/main/arm/scripts/genImageModel.sh
@@ -33,7 +33,6 @@ domainInfo:
AdminUserName: "@@SECRET:__weblogic-credentials__:username@@"
AdminPassword: "@@SECRET:__weblogic-credentials__:password@@"
ServerStartMode: "prod"
- domainLibraries: [ 'wlsdeploy/domainLibraries/postgresql-42.2.8.jar', 'wlsdeploy/domainLibraries/mssql-jdbc-7.4.1.jre8.jar']
topology:
Name: "@@ENV:CUSTOM_DOMAIN_NAME@@"
@@ -136,7 +135,9 @@ cat <>${filePath}
SecurityConfiguration:
NodeManagerUsername: "@@SECRET:__weblogic-credentials__:username@@"
NodeManagerPasswordEncrypted: "@@SECRET:__weblogic-credentials__:password@@"
-
+ SecureMode:
+ SecureModeEnabled: false
+
resources:
SelfTuning:
MinThreadsConstraint:
@@ -169,6 +170,7 @@ EOF
index=1
for item in $appUrlArray; do
echo ${item}
+ item=$(echo $item | tr -d "\"") # remove ""
# e.g. https://wlsaksapp.blob.core.windows.net/japps/testwebapp.war?sp=r&se=2021-04-29T15:12:38Z&sv=2020-02-10&sr=b&sig=7grL4qP%2BcJ%2BLfDJgHXiDeQ2ZvlWosRLRQ1ciLk0Kl7M%3D
urlWithoutQueryString="${item%\?*}"
echo $urlWithoutQueryString
@@ -183,7 +185,7 @@ EOF
continue
fi
- curl -m ${curlMaxTime} -fL "$item" -o ${scriptDir}/model-images/wlsdeploy/applications/${fileName}
+ curl -m ${curlMaxTime} --retry ${retryMaxAttempt} -fL "$item" -o ${scriptDir}/model-images/wlsdeploy/applications/${fileName}
if [ $? -ne 0 ];then
echo "Failed to download $item"
exit 1
@@ -198,4 +200,4 @@ EOF
done
# print model
-cat ${filePath}
\ No newline at end of file
+cat ${filePath}
diff --git a/weblogic-azure-aks/src/main/arm/scripts/inline-scripts/enableAgic.sh b/weblogic-azure-aks/src/main/arm/scripts/inline-scripts/enableAgic.sh
new file mode 100644
index 000000000..2660d9d2b
--- /dev/null
+++ b/weblogic-azure-aks/src/main/arm/scripts/inline-scripts/enableAgic.sh
@@ -0,0 +1,44 @@
+# Copyright (c) 2021, 2024, Oracle Corporation and/or its affiliates.
+# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+function enable_aks_msi() {
+ local identityLength=$(az aks show -g ${AKS_CLUSTER_RG_NAME} -n ${AKS_CLUSTER_NAME} | jq '.identity | length')
+ echo "identityLength ${identityLength}"
+
+ if [ $identityLength -lt 1 ]; then
+ echo "enable managed identity..."
+ # Your cluster is using service principal, and you are going to update the cluster to use systemassigned managed identity.
+ # After updating, your cluster's control plane and addon pods will switch to use managed identity, but kubelet will KEEP USING SERVICE PRINCIPAL until you upgrade your agentpool.
+ az aks update -y -g ${AKS_CLUSTER_RG_NAME} -n ${AKS_CLUSTER_NAME} --enable-managed-identity
+
+ utility_validate_status "Enable managed identity for ${AKS_CLUSTER_NAME}."
+ fi
+}
+
+function install_azure_ingress() {
+ local agicEnabled=$(az aks show -n ${AKS_CLUSTER_NAME} -g ${AKS_CLUSTER_RG_NAME} |
+ jq '.addonProfiles.ingressApplicationGateway.enabled')
+ local agicGatewayId=""
+
+ if [[ "${agicEnabled,,}" == "true" ]]; then
+ agicGatewayId=$(az aks show -n ${AKS_CLUSTER_NAME} -g ${AKS_CLUSTER_RG_NAME} |
+ jq '.addonProfiles.ingressApplicationGateway.config.applicationGatewayId' |
+ tr -d "\"")
+ fi
+
+ local appgwId=$(az network application-gateway show \
+ -n ${APPGW_NAME} \
+ -g ${CURRENT_RG_NAME} -o tsv --query "id")
+
+ if [[ "${agicGatewayId}" != "${appgwId}" ]]; then
+ az aks enable-addons -n ${AKS_CLUSTER_NAME} -g ${AKS_CLUSTER_RG_NAME} --addons ingress-appgw --appgw-id $appgwId
+ utility_validate_status "Install app gateway ingress controller."
+ fi
+}
+
+# Main script
+set -Eo pipefail
+
+enable_aks_msi
+
+install_azure_ingress
diff --git a/weblogic-azure-aks/src/main/arm/scripts/inline-scripts/enableHpa.sh b/weblogic-azure-aks/src/main/arm/scripts/inline-scripts/enableHpa.sh
new file mode 100644
index 000000000..36524bae5
--- /dev/null
+++ b/weblogic-azure-aks/src/main/arm/scripts/inline-scripts/enableHpa.sh
@@ -0,0 +1,68 @@
+# Copyright (c) 2024, Oracle Corporation and/or its affiliates.
+# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+function get_cluster_uid(){
+ local clusterUid=$(kubectl get clusters -n ${WLS_NAMESPACE} -o=jsonpath='{.items[].metadata.name}')
+ utility_validate_status "Obtain cluster UID."
+ export WLS_CLUSTER_UID=${clusterUid}
+}
+
+function scaling_basedon_cpu(){
+ kubectl autoscale cluster ${WLS_CLUSTER_UID} \
+ --cpu-percent=${UTILIZATION_PERCENTAGE} \
+ --min=1 \
+ --max=${WLS_CLUSTER_SIZE} \
+ -n ${WLS_NAMESPACE}
+ utility_validate_status "Enable HPA based on CPU utilization."
+}
+
+function scaling_basedon_memory(){
+ cat <scaler-memory.yaml
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+ name: ${WLS_CLUSTER_UID}
+ namespace: ${WLS_NAMESPACE}
+spec:
+ scaleTargetRef:
+ apiVersion: weblogic.oracle/v1
+ kind: Cluster
+ name: ${WLS_CLUSTER_UID}
+ minReplicas: 1
+ maxReplicas: ${WLS_CLUSTER_SIZE}
+ metrics:
+ - type: Resource
+ resource:
+ name: memory
+ target:
+ averageUtilization: ${UTILIZATION_PERCENTAGE}
+ type: Utilization
+EOF
+
+ kubectl apply -f scaler-memory.yaml
+ utility_validate_status "Enable HPA based on memory utilization."
+}
+
+function check_kubernetes_metrics_server(){
+ # $?=1 if there is no running kms pod.
+ kubectl get pod -l k8s-app=metrics-server -n kube-system | grep "Running"
+ # exit if $?=1
+ utility_validate_status "There should be at least one pod of kubernetes metrics server running."
+}
+
+# Main script
+set -Eo pipefail
+
+install_kubectl
+
+connect_aks $AKS_CLUSTER_NAME $AKS_CLUSTER_RG_NAME
+
+get_cluster_uid
+
+check_kubernetes_metrics_server
+
+if [ "$HPA_SCALE_TYPE" == "cpu" ]; then
+ scaling_basedon_cpu
+elif [ "$HPA_SCALE_TYPE" == "memory" ]; then
+ scaling_basedon_memory
+fi
diff --git a/weblogic-azure-aks/src/main/arm/scripts/inline-scripts/enablePrometheusMetrics.sh b/weblogic-azure-aks/src/main/arm/scripts/inline-scripts/enablePrometheusMetrics.sh
new file mode 100644
index 000000000..1d8917c8d
--- /dev/null
+++ b/weblogic-azure-aks/src/main/arm/scripts/inline-scripts/enablePrometheusMetrics.sh
@@ -0,0 +1,405 @@
+# Copyright (c) 2024, Oracle Corporation and/or its affiliates.
+# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+#!/bin/bash
+
+function enable_promethues_metrics(){
+ # See https://learn.microsoft.com/en-us/azure/azure-monitor/containers/kubernetes-monitoring-enable?tabs=cli#enable-prometheus-and-grafana
+ az extension add --name k8s-extension && true
+
+ ### Use existing Azure Monitor workspace
+ az aks update --enable-azure-monitor-metrics \
+ --name ${AKS_CLUSTER_NAME} \
+ --resource-group ${AKS_CLUSTER_RG_NAME} \
+ --azure-monitor-workspace-resource-id "${AMA_WORKSPACE_ID}" \
+ --only-show-errors
+
+ utility_validate_status "Enable Promethues Metrics."
+
+ az extension add --name aks-preview && true
+ az extension remove --name k8s-extension && true
+
+ #Verify that the DaemonSet was deployed properly on the Linux node pools
+ #https://learn.microsoft.com/en-us/azure/azure-monitor/containers/kubernetes-monitoring-enable?tabs=cli#managed-prometheus
+ kubectl get ds ama-metrics-node --namespace=kube-system
+ #if the deployment fails, $?=1.
+ utility_validate_status "Validate promethues metrics is enabled."
+}
+
+# https://learn.microsoft.com/en-us/azure/azure-monitor/containers/prometheus-metrics-scrape-configuration
+function deploy_customize_scraping(){
+ # https://learn.microsoft.com/en-us/azure/azure-monitor/containers/prometheus-metrics-scrape-configuration?tabs=CRDConfig%2CCRDScrapeConfig#basic-authentication
+ local wlsShibbolethBase64=$(echo -n "${WLS_ADMIN_SHIBBOLETH}" | base64)
+ cat <prometheus-config
+global:
+ scrape_interval: 30s
+scrape_configs:
+- job_name: '${WLS_DOMAIN_UID}'
+ kubernetes_sd_configs:
+ - role: pod
+ namespaces:
+ names: [${WLS_NAMESPACE}]
+ basic_auth:
+ username: ${WLS_ADMIN_USERNAME}
+ password_file: /etc/prometheus/certs/password1
+EOF
+
+ #validate the scrape config file
+ local podNamesinKubeSystem=$(kubectl get pods -l rsName=ama-metrics -n=kube-system -o json | jq -r '.items[].metadata.name')
+ mkdir promconfigvalidator
+ for podname in ${podNamesinKubeSystem}
+ do
+ kubectl cp -n=kube-system "${podname}":/opt/promconfigvalidator ./promconfigvalidator/promconfigvalidator
+ kubectl cp -n=kube-system "${podname}":/opt/microsoft/otelcollector/collector-config-template.yml ./promconfigvalidator/collector-config-template.yml
+ chmod 500 ./promconfigvalidator/promconfigvalidator
+ done
+
+ if [ ! -f "./promconfigvalidator/promconfigvalidator" ]; then
+ echo_stderr "Failed to download promconfigvalidator tool that is shipped inside the Azure Monitor metrics addon pod(s)."
+ exit 1
+ fi
+
+ ./promconfigvalidator/promconfigvalidator --config "./prometheus-config" --otelTemplate "./promconfigvalidator/collector-config-template.yml"
+ utility_validate_status "Validate prometheus-config using promconfigvalidator."
+
+ kubectl create configmap ama-metrics-prometheus-config --from-file=prometheus-config -n kube-system
+ utility_validate_status "Create ama-metrics-prometheus-config in kube-system namespace."
+}
+
+function get_wls_monitoring_exporter_image_url() {
+ local wlsToolingFamilyJsonFile=weblogic_tooling_family.json
+ local imageUrl="ghcr.io/oracle/weblogic-monitoring-exporter:2.1.9"
+
+ # download the json file that has well tested monitoring exporter image url from weblogic-azure repo.
+ curl -m ${curlMaxTime} --retry ${retryMaxAttempt} -fsL "${gitUrl4WLSToolingFamilyJsonFile}" -o ${wlsToolingFamilyJsonFile}
+ if [ $? -eq 0 ]; then
+ imageURL=$(cat ${wlsToolingFamilyJsonFile} | jq ".items[] | select(.key==\"WME\") | .imageURL" | tr -d "\"")
+ echo_stdout "well tested monitoring exporter image url: ${imageURL}"
+ fi
+
+ echo_stdout "Use monitoring exporter image: ${imageURL} "
+ export WME_IMAGE_URL=${imageUrl}
+}
+
+# https://github.com/oracle/weblogic-monitoring-exporter
+function deploy_webLogic_monitoring_exporter(){
+ local wlsVersion=$(kubectl -n ${WLS_NAMESPACE} get domain ${WLS_DOMAIN_UID} -o=jsonpath='{.spec.restartVersion}' | tr -d "\"")
+ wlsVersion=$((wlsVersion+1))
+
+ cat <patch-file.json
+[
+ {
+ "op": "replace",
+ "path": "/spec/restartVersion",
+ "value": "${wlsVersion}"
+ },
+ {
+ "op": "add",
+ "path": "/spec/monitoringExporter",
+ "value": {
+ "configuration": {
+ "domainQualifier": true,
+ "metricsNameSnakeCase": true,
+ "queries": [
+ {
+ "applicationRuntimes": {
+ "componentRuntimes": {
+ "key": "name",
+ "prefix": "webapp_config_",
+ "servlets": {
+ "key": "servletName",
+ "prefix": "weblogic_servlet_",
+ "values": [
+ "invocationTotalCount",
+ "reloadTotal",
+ "executionTimeAverage",
+ "poolMaxCapacity",
+ "executionTimeTotal",
+ "reloadTotalCount",
+ "executionTimeHigh",
+ "executionTimeLow"
+ ]
+ },
+ "type": "WebAppComponentRuntime",
+ "values": [
+ "deploymentState",
+ "contextRoot",
+ "sourceInfo",
+ "openSessionsHighCount",
+ "openSessionsCurrentCount",
+ "sessionsOpenedTotalCount",
+ "sessionCookieMaxAgeSecs",
+ "sessionInvalidationIntervalSecs",
+ "sessionTimeoutSecs",
+ "singleThreadedServletPoolSize",
+ "sessionIDLength",
+ "servletReloadCheckSecs",
+ "jSPPageCheckSecs"
+ ]
+ },
+ "workManagerRuntimes": {
+ "prefix": "workmanager_",
+ "key": "applicationName",
+ "values": [
+ "pendingRequests",
+ "completedRequests",
+ "stuckThreadCount"]
+ },
+ "key": "name",
+ "keyName": "app"
+ },
+ "JVMRuntime": {
+ "key": "name",
+ "values": [
+ "heapFreeCurrent",
+ "heapFreePercent",
+ "heapSizeCurrent",
+ "heapSizeMax",
+ "uptime",
+ "processCpuLoad"
+ ]
+ },
+ "key": "name",
+ "keyName": "server"
+ }
+ ]
+ },
+ "image": "${WME_IMAGE_URL}",
+ "port": 8080
+ }
+ }
+]
+EOF
+
+ kubectl -n ${WLS_NAMESPACE} patch domain ${WLS_DOMAIN_UID} \
+ --type=json \
+ --patch-file patch-file.json
+ utility_validate_status "Enable WebLogic Monitoring Exporter."
+
+ local timestampBeforePatchingDomain=$(date +%s)
+ local clusterName=$(kubectl get cluster -n ${WLS_NAMESPACE} -o json | jq -r '.items[0].metadata.name')
+ local replicas=$(kubectl -n ${WLS_NAMESPACE} get cluster ${clusterName} -o json \
+ | jq '. | .spec.replicas')
+
+ # wait for the restart completed.
+ utility_wait_for_pod_restarted \
+ ${timestampBeforePatchingDomain} \
+ ${replicas} \
+ ${WLS_DOMAIN_UID} \
+ ${checkPodStatusMaxAttemps} \
+ ${checkPodStatusInterval}
+}
+
+function wait_for_keda_ready(){
+ local ready=false
+ local attempt=0
+
+ while [[ "${ready}" == "false" && $attempt -le ${checkKedaMaxAttempt} ]]; do
+ echo_stdout "Check if KEDA is ready, attempt: ${attempt}."
+ ready=true
+
+ local podCount=$(kubectl get pods -n ${KEDA_NAMESPACE} -o json | jq -r '.items | length')
+ if [ $podCount -lt 3 ];then
+ ready=false
+ fi
+
+ local podnames=$(kubectl get pods -n ${KEDA_NAMESPACE} -o json | jq -r '.items[].metadata.name')
+ for podname in ${podnames}
+ do
+ kubectl get pod ${podname} -n ${KEDA_NAMESPACE} | grep "Running"
+
+ if [ $? -eq 1 ];then
+ ready=false
+ fi
+ done
+
+ attempt=$((attempt + 1))
+ sleep ${checkKedaInteval}
+ done
+
+ if [ ${attempt} -gt ${checkKedaMaxAttempt} ]; then
+ echo_stderr "Failed to enable KEDA."
+ exit 1
+ fi
+
+ echo_stderr "KEDA is running."
+}
+
+function get_keda_latest_version() {
+ local kedaVersion
+ kedaVersion=$(helm search repo kedacore/keda --versions | awk '/^kedacore\/keda/ {print $2; exit}')
+ export KEDA_VERSION="${kedaVersion}"
+ echo_stderr "Use latest KEDA. KEDA version: ${KEDA_VERSION}"
+}
+
+
+function get_keda_version() {
+ local versionJsonFileName="aks_tooling_well_tested_version.json"
+ local kedaWellTestedVersion
+
+ # Download the version JSON file
+ curl -L "${gitUrl4AksToolingWellTestedVersionJsonFile}" --retry "${retryMaxAttempt}" -o "${versionJsonFileName}"
+
+ # Extract KEDA version from JSON
+ kedaWellTestedVersion=$(jq -r '.items[] | select(.key == "keda") | .version' "${versionJsonFileName}")
+
+ # Check if version is available
+ if [ $? -ne 0 ]; then
+ get_keda_latest_version
+ return 0
+ fi
+
+ # Print KEDA well-tested version
+ echo_stderr "KEDA well-tested version: ${kedaWellTestedVersion}"
+
+ # Search for KEDA version in Helm repo
+ if ! helm search repo kedacore/keda --versions | grep -q "${kedaWellTestedVersion}"; then
+ get_keda_latest_version
+ return 0
+ fi
+
+ # Export KEDA version
+ export KEDA_VERSION="${kedaWellTestedVersion}"
+ echo_stderr "KEDA version: ${KEDA_VERSION}"
+}
+
+# https://learn.microsoft.com/en-us/azure/azure-monitor/containers/integrate-keda
+function enable_keda_addon() {
+ local oidcEnabled=$(az aks show --resource-group $AKS_CLUSTER_RG_NAME --name $AKS_CLUSTER_NAME --query oidcIssuerProfile.enabled)
+ local workloadIdentity=$(az aks show --resource-group $AKS_CLUSTER_RG_NAME --name $AKS_CLUSTER_NAME --query securityProfile.workloadIdentity)
+
+ if [[ "${oidcEnabled,,}" == "false" || -z "${workloadIdentity}" ]]; then
+ # mitigate https://github.com/Azure/azure-cli/issues/28649
+ pip install --upgrade azure-core
+ az aks update -g $AKS_CLUSTER_RG_NAME -n $AKS_CLUSTER_NAME --enable-workload-identity --enable-oidc-issuer
+ utility_validate_status "Enable oidc and worload identity in AKS $AKS_CLUSTER_NAME."
+ fi
+
+ export OIDC_ISSUER_URL=$(az aks show -n $AKS_CLUSTER_NAME -g $AKS_CLUSTER_RG_NAME --query "oidcIssuerProfile.issuerUrl" -otsv)
+ export KEDA_UAMI_CLIENT_ID=$(az identity show --resource-group $CURRENT_RG_NAME --name $KEDA_UAMI_NAME --query 'clientId' -otsv)
+ local tenantId=$(az identity show --resource-group $CURRENT_RG_NAME --name $KEDA_UAMI_NAME --query 'tenantId' -otsv)
+
+ kubectl create namespace ${KEDA_NAMESPACE}
+
+ cat <kedascalersample.yaml
+apiVersion: keda.sh/v1alpha1
+kind: TriggerAuthentication
+metadata:
+ name: azure-managed-prometheus-trigger-auth
+ namespace: ${WLS_NAMESPACE}
+spec:
+ podIdentity:
+ provider: azure-workload
+ identityId: ${KEDA_UAMI_CLIENT_ID}
+---
+apiVersion: keda.sh/v1alpha1
+kind: ScaledObject
+metadata:
+ name: azure-managed-prometheus-scaler
+ namespace: ${WLS_NAMESPACE}
+spec:
+ scaleTargetRef:
+ apiVersion: weblogic.oracle/v1
+ kind: Cluster
+ name: ${clusterName}
+ minReplicaCount: 1
+ maxReplicaCount: ${WLS_CLUSTER_SIZE}
+ triggers:
+ - type: prometheus
+ metadata:
+ serverAddress: ${kedaServerAddress}
+ metricName: webapp_config_open_sessions_high_count
+ query: sum(webapp_config_open_sessions_high_count{app=""}) # Note: query must return a vector/scalar single element response
+ threshold: '10'
+ activationThreshold: '1'
+ authenticationRef:
+ name: azure-managed-prometheus-trigger-auth
+EOF
+
+ local base64ofKedaScalerSample=$(cat ./kedascalersample.yaml | base64)
+ local result=$(jq -n -c \
+ --arg kedaScalerServerAddress "$kedaServerAddress" \
+ --arg base64ofKedaScalerSample "${base64ofKedaScalerSample}" \
+ '{kedaScalerServerAddress: $kedaScalerServerAddress, base64ofKedaScalerSample: $base64ofKedaScalerSample}')
+ echo "result is: $result"
+ echo $result >$AZ_SCRIPTS_OUTPUT_PATH
+}
+
+# TBD see if we can query some of the metrics
+
+# Main script
+set -Eo pipefail
+
+install_kubectl
+
+install_helm
+
+connect_aks $AKS_CLUSTER_NAME $AKS_CLUSTER_RG_NAME
+
+get_wls_monitoring_exporter_image_url
+
+deploy_webLogic_monitoring_exporter
+
+enable_promethues_metrics
+
+deploy_customize_scraping
+
+enable_keda_addon
+
+output
diff --git a/weblogic-azure-aks/src/main/arm/scripts/inline-scripts/queryDomainConfigurations.sh b/weblogic-azure-aks/src/main/arm/scripts/inline-scripts/queryDomainConfigurations.sh
new file mode 100644
index 000000000..4719c0748
--- /dev/null
+++ b/weblogic-azure-aks/src/main/arm/scripts/inline-scripts/queryDomainConfigurations.sh
@@ -0,0 +1,81 @@
+# Copyright (c) 2021, 2024 Oracle Corporation and/or its affiliates.
+# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
+# This script runs on Azure Container Instance with Alpine Linux that Azure Deployment script creates.
+#
+# env inputs:
+# AKS_CLUSTER_NAME
+# AKS_CLUSTER_RESOURCEGROUP_NAME
+# WLS_CLUSTER_NAME
+# WLS_DOMAIN_UID
+
+# Main script
+wlsContainerName="weblogic-server"
+
+echo "install kubectl"
+az aks install-cli
+
+echo "Connect AKS"
+connect_aks $AKS_CLUSTER_NAME $AKS_CLUSTER_RESOURCEGROUP_NAME
+
+wlsDomainNS="${WLS_DOMAIN_UID}-ns"
+
+domainConfigurationYaml=/tmp/domain.yaml
+rm -f ${domainConfigurationYaml}
+kubectl get domain ${WLS_DOMAIN_UID} -n ${wlsDomainNS} -o yaml >${domainConfigurationYaml}
+
+podNum=$(kubectl -n ${wlsDomainNS} get pod -l weblogic.clusterName=${WLS_CLUSTER_NAME} -o json | jq '.items| length')
+ if [ ${podNum} -le 0 ]; then
+ echo_stderr "Ensure your cluster has at least one pod."
+ exit 1
+ fi
+
+podName=$(kubectl -n ${wlsDomainNS} get pod -l weblogic.clusterName=${WLS_CLUSTER_NAME} -o json \
+ | jq '.items[0] | .metadata.name' \
+ | tr -d "\"")
+
+echo "Copy model.yaml from /u01/wdt/models"
+targetModelYaml=/tmp/model.yaml
+rm -f ${targetModelYaml}
+kubectl cp -n ${wlsDomainNS} -c ${wlsContainerName} ${podName}:/u01/wdt/models/model.yaml ${targetModelYaml}
+if [ $? != 0 ]; then
+ echo >&2 "Fail to copy ${podName}:/u01/wdt/models/model.yaml."
+ exit 1
+fi
+
+echo "Copy model.properties from from /u01/wdt/models"
+targetModelProperties=/tmp/model.properties
+rm -f ${targetModelProperties}
+kubectl cp -n ${wlsDomainNS} -c ${wlsContainerName} ${podName}:/u01/wdt/models/model.properties ${targetModelProperties}
+if [ $? != 0 ]; then
+ echo >&2 "Fail to copy ${podName}:/u01/wdt/models/model.properties."
+ exit 1
+fi
+
+echo "Query WebLogic version and patch numbers"
+targetFile4Versions=/tmp/version.info
+kubectl exec ${podName} -n ${wlsDomainNS} -c ${wlsContainerName} \
+ -- bash -c 'source $ORACLE_HOME/wlserver/server/bin/setWLSEnv.sh > /dev/null 2>&1 && java weblogic.version -verbose >"'${targetFile4Versions}'"'
+if [ $? != 0 ]; then
+ echo >&2 "Fail to run java weblogic.version."
+ exit 1
+fi
+rm -f ${targetFile4Versions}
+kubectl cp -n ${wlsDomainNS} -c ${wlsContainerName} ${podName}:${targetFile4Versions} ${targetFile4Versions}
+if [ $? != 0 ]; then
+ echo >&2 "Fail to copy ${podName}:${targetFile4Versions}."
+ exit 1
+fi
+
+base64ofDomainYaml=$(cat ${domainConfigurationYaml} | base64)
+base64ofModelYaml=$(cat ${targetModelYaml} | base64)
+base64ofModelProperties=$(cat ${targetModelProperties} | base64)
+base64ofWLSVersionDetails=$(cat ${targetFile4Versions} | base64)
+
+result=$(jq -n -c \
+ --arg domainDeploymentYaml "$base64ofDomainYaml" \
+ --arg wlsImageModelYaml "$base64ofModelYaml" \
+ --arg wlsImageProperties "$base64ofModelProperties" \
+ --arg wlsVersionDetails "${base64ofWLSVersionDetails}" \
+ '{domainDeploymentYaml: $domainDeploymentYaml, wlsImageModelYaml: $wlsImageModelYaml, wlsImageProperties: $wlsImageProperties, wlsVersionDetails: $wlsVersionDetails}')
+echo "result is: $result"
+echo $result >$AZ_SCRIPTS_OUTPUT_PATH
diff --git a/weblogic-azure-aks/src/main/arm/scripts/inline-scripts/queryPrivateIPForAppGateway.sh b/weblogic-azure-aks/src/main/arm/scripts/inline-scripts/queryPrivateIPForAppGateway.sh
new file mode 100644
index 000000000..1db3a69c3
--- /dev/null
+++ b/weblogic-azure-aks/src/main/arm/scripts/inline-scripts/queryPrivateIPForAppGateway.sh
@@ -0,0 +1,44 @@
+# Copyright (c) 2022, Oracle Corporation and/or its affiliates.
+# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
+# This script runs on Azure Container Instance with Alpine Linux that Azure Deployment script creates.
+#
+# env inputs:
+# SUBNET_ID
+# KNOWN_IP
+
+function query_ip() {
+ echo_stdout "Subnet Id: ${SUBNET_ID}"
+
+ # select a available private IP
+ # azure reserves the first 3 private IPs.
+ local ret=$(az network vnet check-ip-address \
+ --ids ${SUBNET_ID} \
+ --ip-address ${KNOWN_IP})
+ local available=$(echo ${ret} | jq -r .available)
+ if [[ "${available,,}" == "true" ]]; then
+ outputPrivateIP=${KNOWN_IP}
+ else
+ local privateIPAddress=$(echo ${ret} | jq -r .availableIpAddresses[0])
+ if [[ -z "${privateIPAddress}" ]] || [[ "${privateIPAddress}"=="null" ]]; then
+ echo_stderr "ERROR: make sure there is available IP for application gateway in your subnet."
+ fi
+
+ outputPrivateIP=${privateIPAddress}
+ fi
+}
+
+function output_result() {
+ echo "Available Private IP: ${outputPrivateIP}"
+ result=$(jq -n -c \
+ --arg privateIP "$outputPrivateIP" \
+ '{privateIP: $privateIP}')
+ echo "result is: $result"
+ echo $result >$AZ_SCRIPTS_OUTPUT_PATH
+}
+
+# main script
+outputPrivateIP="10.0.0.1"
+
+query_ip
+
+output_result
\ No newline at end of file
diff --git a/weblogic-azure-aks/src/main/arm/scripts/inline-scripts/validateAgic.sh b/weblogic-azure-aks/src/main/arm/scripts/inline-scripts/validateAgic.sh
new file mode 100644
index 000000000..a7756cf14
--- /dev/null
+++ b/weblogic-azure-aks/src/main/arm/scripts/inline-scripts/validateAgic.sh
@@ -0,0 +1,36 @@
+# Copyright (c) 2024, Oracle Corporation and/or its affiliates.
+# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+function wait_for_azure_ingress_ready() {
+ local ready=false
+ local attempt=0
+
+ while [[ "${ready}" == "false" && $attempt -le ${checkAgicMaxAttempt} ]]; do
+ echo_stdout "Check if ACIG is ready, attempt: ${attempt}."
+ ready=true
+
+ local ret=$(kubectl get pod -n kube-system | grep "ingress-appgw-deployment-*" | grep "Running")
+ if [ -z "${ret}" ]; then
+ ready=false
+ fi
+
+ attempt=$((attempt + 1))
+ sleep ${checkAgicInterval}
+ done
+
+ if [ ${attempt} -gt ${checkAgicMaxAttempt} ]; then
+ echo_stderr "Failed to enable Application Gateway Ingress Controler."
+ exit 1
+ fi
+
+ echo "Application Gateway Ingress Controler is running."
+}
+
+# Main script
+set -Eo pipefail
+
+install_kubectl
+
+connect_aks $AKS_CLUSTER_NAME $AKS_CLUSTER_RG_NAME
+
+wait_for_azure_ingress_ready
diff --git a/weblogic-azure-aks/src/main/arm/scripts/inline-scripts/validateParameters.sh b/weblogic-azure-aks/src/main/arm/scripts/inline-scripts/validateParameters.sh
new file mode 100644
index 000000000..e4460b126
--- /dev/null
+++ b/weblogic-azure-aks/src/main/arm/scripts/inline-scripts/validateParameters.sh
@@ -0,0 +1,588 @@
+# Copyright (c) 2021, 2024, Oracle Corporation and/or its affiliates.
+# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
+# This script runs on Azure Container Instance with Alpine Linux that Azure Deployment script creates.
+#
+# env inputs:
+# ORACLE_ACCOUNT_NAME
+# ORACLE_ACCOUNT_SHIBBOLETH
+# ACR_NAME
+# AKS_CLUSTER_NAME
+# AKS_CLUSTER_RESOURCEGROUP_NAME
+# BASE64_FOR_SERVICE_PRINCIPAL
+# WLS_SSL_IDENTITY_DATA
+# WLS_SSL_IDENTITY_SHIBBOLETH
+# WLS_SSL_IDENTITY_TYPE
+# WLS_SSL_TRUST_DATA
+# WLS_SSL_TRUST_SHIBBOLETH
+# WLS_SSL_TRUST_TYPE
+# WLS_SSL_PRIVATE_KEY_ALIAS
+# WLS_SSL_PRIVATE_KEY_SHIBBOLETH
+# APPLICATION_GATEWAY_SSL_FRONTEND_CERT_DATA
+# APPLICATION_GATEWAY_SSL_FRONTEND_CERT_SHIBBOLETH
+# DNS_ZONE_NAME
+# DNS_ZONE_RESOURCEGROUP_NAME
+# AKS_VERSION
+# USE_AKS_WELL_TESTED_VERSION
+# VNET_FOR_APPLICATIONGATEWAY
+
+#Validate teminal status with $?, exit with exception if errors happen.
+# $1 - error message
+# $2 - root cause message
+function validate_status() {
+ if [ $? != 0 ]; then
+ echo_stderr "Errors happen during: $1." $2
+ exit 1
+ else
+ echo_stdout "$1"
+ fi
+}
+
+# Validate compute resources
+# Check points:
+# - there is enough resource for AKS cluster
+# - there is enough resource for VM to build the image
+# Example to list the vm usage:
+# az vm list-usage --location "East US" -o table
+# Name CurrentValue Limit
+# ---------------------------------------- -------------- -------
+# Availability Sets 0 2500
+# Total Regional vCPUs 2 200
+# Virtual Machines 1 25000
+# Virtual Machine Scale Sets 0 2500
+# Dedicated vCPUs 0 3000
+# Cloud Services 0 2500
+# Total Regional Low-priority vCPUs 0 100
+# Standard DSv2 Family vCPUs 0 100
+# Standard Av2 Family vCPUs 2 100
+# Basic A Family vCPUs 0 100
+# Standard A0-A7 Family vCPUs 0 200
+# Standard A8-A11 Family vCPUs 0 100
+# Standard D Family vCPUs 0 100
+# Standard Dv2 Family vCPUs 0 100
+# Standard DS Family vCPUs 0 100
+# Standard G Family vCPUs 0 100
+# Standard GS Family vCPUs 0 100
+# Standard F Family vCPUs 0 100
+# Standard FS Family vCPUs 0 100
+# ... ...
+function validate_compute_resources() {
+ # Resource for ubuntu machine
+ # 2 Standard Av2 Family vCPUs
+
+ # query total cores
+ local vmUsage=$(az vm list-usage -l ${location} -o json)
+ local totalCPUs=$(echo ${vmUsage} | jq '.[] | select(.name.value=="cores") | .limit' | tr -d "\"")
+ local currentCPUs=$(echo ${vmUsage} | jq '.[] | select(.name.value=="cores") | .currentValue' | tr -d "\"")
+ local aksCPUs=0
+
+ # if creating new AKS cluster
+ if [[ "${createAKSCluster,,}" == "true" ]]; then
+ local aksVMDetails=$(az vm list-skus --size ${aksAgentPoolVMSize} -l ${location} --query [0])
+ local vmFamily=$(echo ${aksVMDetails} | jq '.family' | tr -d "\"")
+ local vmCPUs=$(echo ${aksVMDetails} | jq '.capabilities[] | select(.name=="vCPUs") | .value' | tr -d "\"")
+ aksCPUs=$((vmCPUs * aksAgentPoolNodeCount))
+
+ # query CPU usage of the vm family
+ local familyLimit=$(echo ${vmUsage} | jq '.[] | select(.name.value=="'${vmFamily}'") | .limit' | tr -d "\"")
+ local familyUsage=$(echo ${vmUsage} | jq '.[] | select(.name.value=="'${vmFamily}'") | .currentValue' | tr -d "\"")
+ local requiredFamilyCPUs=$((aksCPUs + familyUsage))
+ # make sure thers is enough vCPUs of the family for AKS
+ if [ ${requiredFamilyCPUs} -gt ${familyLimit} ]; then
+ echo_stderr "It requires ${aksCPUs} ${vmFamily} vCPUs to create the AKS cluster, ${vmFamily} vCPUs quota is limited to ${familyLimit}, current usage is ${familyUsage}."
+ exit 1
+ fi
+ fi
+
+ local vmFamilyOfUbuntu="standardAv2Family"
+ local familyLimit=$(echo ${vmUsage} | jq '.[] | select(.name.value=="'${vmFamilyOfUbuntu}'") | .limit' | tr -d "\"")
+ local familyUsage=$(echo ${vmUsage} | jq '.[] | select(.name.value=="'${vmFamilyOfUbuntu}'") | .currentValue' | tr -d "\"")
+ local requiredFamilyCPUs=$((2 + familyUsage))
+ # make sure thers is enough vCPUs of the family for ubuntu machine
+ if [ ${requiredFamilyCPUs} -gt ${familyLimit} ]; then
+ echo_stderr "It requires 2 ${vmFamilyOfUbuntu} vCPUs to create an ubuntu machine for docker image, ${vmFamilyOfUbuntu} vCPUs quota is limited to ${familyLimit}, current usage is ${familyUsage}."
+ exit 1
+ fi
+
+ local requiredCPU=$((aksCPUs + 2 + currentCPUs))
+ if [ ${requiredCPU} -gt ${totalCPUs} ]; then
+ echo_stderr "It requires ${requiredCPU} vCPUs to run WLS on AKS, vCPUs quota is limited to ${totalCPUs}, current usage is ${currentCPUs}."
+ exit 1
+ fi
+
+ echo_stdout "Check compute resources: passed!"
+}
+
+# Ensure the cluster has enough memory resources.
+# The offer deploys a WLS cluster with 1 + ${APP_REPLICAS} pods, each pod requestes 1.5GB and 0.25CPU.
+# Minimum memory requirement: 12 + (APP_REPLICAS + 1)*1.5 GB
+function validate_memory_resources() {
+ if [[ "${createAKSCluster,,}" == "true" ]]; then
+ local requiredMemoryinGB=$(echo "12+($APP_REPLICAS+1)*1.5" | bc)
+
+ local vmDetails=$(az vm list-skus --size ${aksAgentPoolVMSize} -l ${location} --query [0])
+ validate_status "Query VM details of ${aksAgentPoolVMSize} in ${location}."
+
+ local memoryGB=$(echo ${vmDetails} | jq '.capabilities[] | select(.name=="MemoryGB") | .value' | tr -d "\"")
+ local requestedMemory=$(echo "$aksAgentPoolNodeCount*$memoryGB" | bc)
+ echo_stdout "Current requested memory is ${requestedMemory}GB."
+ if [[ $(echo "${requestedMemory}<${requiredMemoryinGB}" | bc) -eq 1 ]]; then
+ echo_stderr "It requires ${requiredMemoryinGB} GiB memory to create the AKS cluster, you have to select a larger VM size or increase node count."
+ exit 1
+ fi
+
+ fi
+
+ echo_stdout "Check memory resources: passed!"
+}
+
+function validate_ocr_account() {
+ # ORACLE_ACCOUNT_NAME
+ # ORACLE_ACCOUNT_SHIBBOLETH
+ docker logout
+ echo "${ORACLE_ACCOUNT_SHIBBOLETH}" | docker login ${ocrLoginServer} -u ${ORACLE_ACCOUNT_NAME} --password-stdin
+ validate_status "login OCR with user ${ORACLE_ACCOUNT_NAME}"
+
+ echo_stdout "Check OCR account: passed!"
+}
+
+function check_acr() {
+ local ready=false
+ local attempt=0
+ while [[ "${ready}" == "false" && $attempt -le ${checkAcrMaxAttempt} ]]; do
+ echo_stdout "Check if ACR ${ACR_NAME} is ready, attempt: ${attempt}."
+ ready=true
+
+ local ret=$(az acr show --name ${ACR_NAME} --resource-group ${ACR_RESOURCE_GROUP})
+ if [ -z "${ret}" ]; then
+ ready=false
+ fi
+
+ attempt=$((attempt + 1))
+ sleep ${checkAcrInterval}
+ done
+
+ if [ ${attempt} -gt ${checkAcrMaxAttempt} ]; then
+ echo_stderr "ACR ${ACR_NAME} is not ready."
+ exit 1
+ fi
+
+ echo_stdout "Check if ACR ${ACR_NAME} is ready to import image."
+}
+
+function obtain_image_architecture() {
+ local acrName=$1
+ local repoName=$2
+ local tag=$3
+ local imageUri="${acrName}.azurecr.io/${repoName}:${tag}"
+
+ local imageArch=$(az acr manifest list-metadata -r ${acrName} -n ${repoName} \
+ | jq '.[] | select(.tags != null) | select(.tags[] | length >0 ) | select(.tags[0]=="'${tag}'") | .architecture' \
+ | tr -d "\"")
+
+ if [[ "${imageArch}" == "null" ]]; then
+ # if the image is multi-architecture, the value is empty.
+ # Use the docker manifest inspect command to get the architecture.
+ # https://learn.microsoft.com/en-us/azure/container-registry/push-multi-architecture-images
+ local acrUserName=$(az acr credential show -n ${acrName} --query "username" | tr -d "\"")
+ local acrShibboleth=$(az acr credential show -n ${acrName} --query "passwords[0].value" | tr -d "\"")
+ local acrServer="${acrName}.azurecr.io"
+
+ docker login ${acrServer} -u ${acrUserName} -p ${acrShibboleth}
+ local ret=$(docker manifest inspect ${imageUri} | jq '.manifests[] | .platform.architecture')
+
+ if [[ $ret == *"${constX86Platform}"* && $ret == *"${constARM64Platform}"* ]]; then
+ imageArch="${constMultiArchPlatform}"
+ elif [[ $ret == *"${constX86Platform}"* ]]; then
+ imageArch="${constX86Platform}"
+ elif [[ $ret == *"${constARM64Platform}"* ]]; then
+ imageArch="${constARM64Platform}"
+ else
+ echo_stderr "The architecture of image is not supported. Currently only ARM64 and AMD64 are supported."
+ exit 1
+ fi
+ fi
+ echo_stdout "Architecture of image is ${imageArch}."
+
+ export IMAGE_ARCHITECTURE=${imageArch}
+}
+
+function validate_ocr_image() {
+ local ocrImageFullPath="${ocrLoginServer}/${ocrGaImagePath}:${wlsImageTag}"
+
+ if [[ "${ORACLE_ACCOUNT_ENTITLED,,}" == "true" ]]; then
+
+ # download the ga cpu image mapping file.
+ local cpuImagesListFile=weblogic_cpu_images.json
+ curl -L "${gitUrl4CpuImages}" --retry ${retryMaxAttempt} -o ${cpuImagesListFile}
+ local cpuTag=$(cat ${cpuImagesListFile} | jq ".items[] | select(.gaTag == \"${wlsImageTag}\") | .cpuTag" | tr -d "\"")
+ echo_stdout "cpu tag: ${cpuTag}"
+ # if we can not find a matched image, keep the input tag.
+ if [[ "${cpuTag}" == "" || "${cpuTag,,}" == "null" ]]; then
+ cpuTag=${wlsImageTag}
+ fi
+
+ ocrImageFullPath="${ocrLoginServer}/${ocrCpuImagePath}:${cpuTag}"
+ fi
+
+ echo_stdout "image path: ${ocrImageFullPath}"
+
+ # to mitigate error in https://learn.microsoft.com/en-us/answers/questions/1188413/the-resource-with-name-name-and-type-microsoft-con
+ az provider register -n Microsoft.ContainerRegistry
+
+ check_acr
+
+ # validate the image by importing it to ACR.
+ # if failure happens, the image should be unavailable
+ local tmpRepo="tmp$(date +%s)"
+ local tmpImagePath="${tmpRepo}:${wlsImageTag}"
+ az acr import --name ${ACR_NAME} \
+ --resource-group ${ACR_RESOURCE_GROUP} \
+ --source ${ocrImageFullPath} \
+ -u ${ORACLE_ACCOUNT_NAME} \
+ -p ${ORACLE_ACCOUNT_SHIBBOLETH} \
+ --image ${tmpImagePath} \
+ --only-show-errors
+
+ # $? equals 0 even though failure happens.
+ # check if the image is imported successfully.
+ local ret=$(az acr repository show --name $ACR_NAME --image ${tmpImagePath})
+ if [ -n "${ret}" ]; then
+ obtain_image_architecture ${ACR_NAME} ${tmpRepo} ${wlsImageTag}
+ # delete the image from ACR.
+ az acr repository delete --name ${ACR_NAME} --image ${tmpImagePath} --yes
+ else
+ echo_stderr $ret
+ echo_stderr ""
+ echo_stderr "Image ${ocrImageFullPath} is not available! Please make sure you have accepted the Oracle Standard Terms and Restrictions and the image exists in https://container-registry.oracle.com/ "
+ if [[ "${ORACLE_ACCOUNT_ENTITLED,,}" == "true" ]]; then
+ echo_stderr "Make sure you are entitled to access middleware/weblogic_cpu repository."
+ fi
+
+ exit 1
+ fi
+
+ echo_stdout "Check OCR image ${ocrImageFullPath}: passed!"
+}
+
+function check_acr_admin_enabled() {
+ local acrName=$1
+ local acrRgName=$2
+ echo_stdout "check if admin user enabled in ACR $acrName "
+ local adminUserEnabled=$(az acr show --name $acrName --resource-group ${acrRgName} --query "adminUserEnabled")
+ validate_status "query 'adminUserEnabled' property of ACR ${acrName}" "Invalid ACR: ${acrName}"
+
+ if [[ "${adminUserEnabled}" == "false" ]]; then
+ echo_stderr "Make sure admin user is enabled in ACR $acrName. Please find steps in https://docs.microsoft.com/en-us/azure/container-registry/container-registry-authentication?WT.mc_id=Portal-Microsoft_Azure_CreateUIDef&tabs=azure-cli#admin-account"
+ exit 1
+ fi
+}
+
+function validate_acr_image() {
+ echo_stdout "user provided ACR: $ACR_NAME_FOR_USER_PROVIDED_IMAGE"
+
+ local pathWithoutTag=${userProvidedImagePath%\:*}
+ local repository=${pathWithoutTag#*\/}
+ local tag="${userProvidedImagePath##*:}"
+
+ local tagIndex=$(az acr repository show-tags --name $ACR_NAME_FOR_USER_PROVIDED_IMAGE --repository ${repository} | jq 'index("'${tag}'")')
+ validate_status "check if tag ${tag} exists." "Invalid image path ${userProvidedImagePath}"
+ if [[ "${tagIndex}" == "null" ]]; then
+ echo_stderr "Image ${tag} does not exist in ${repository}."
+ exit 1
+ fi
+
+ obtain_image_architecture ${ACR_NAME_FOR_USER_PROVIDED_IMAGE} ${repository} ${tag}
+
+ echo_stdout "Check ACR image: passed!"
+}
+
+function validate_base_image_path() {
+ if [[ "${useOracleImage,,}" == "true" ]]; then
+ validate_ocr_account
+ validate_ocr_image
+ else
+ validate_acr_image
+ fi
+}
+
+function validate_acr_admin_enabled()
+{
+ if [[ "${useOracleImage,,}" == "true" ]]; then
+ check_acr_admin_enabled "${ACR_NAME}" "${ACR_RESOURCE_GROUP}"
+ else
+ check_acr_admin_enabled "${ACR_NAME_FOR_USER_PROVIDED_IMAGE}" "${ACR_RG_NAME_FOR_USER_PROVIDED_IMAGE}"
+ fi
+}
+
+# Validate whether image architecture is matched with the architecture of the VM.
+# Azure supports both AMD based processor and ARM based CPU, see https://learn.microsoft.com/en-us/azure/virtual-machines/vm-naming-conventions.
+ # For ARM cpu, the VM size name includes letter 'p'.
+ # For AMD cpu, the VM size name does not include letter 'p'.
+# Validate cases:
+ # 1. If the VM size is AMD based, the image should be amd64 or multi-platform.
+ # 2. If the VM size is ARM based, the image should be arm64 or multi-platform.
+# IMAGE_ARCHITECTURE value may be "amd64", "arm64" or "Multi-architecture (amd64 and arm64)".
+function validate_image_compatibility
+{
+ if [[ $aksAgentPoolVMSize == *"p"* ]]; then
+ if [[ "${IMAGE_ARCHITECTURE}" != "${constARM64Platform}" && "${IMAGE_ARCHITECTURE}" != "${constMultiArchPlatform}" ]]; then
+ echo_stderr "The image architecture ${IMAGE_ARCHITECTURE} is not compatible with the ARM based VM size ${aksAgentPoolVMSize}."
+ exit 1
+ fi
+ else
+ if [[ "${IMAGE_ARCHITECTURE}" != "${constX86Platform}" && "${IMAGE_ARCHITECTURE}" != "${constMultiArchPlatform}" ]]; then
+ echo_stderr "The image architecture ${IMAGE_ARCHITECTURE} is not compatible with the AMD based VM size ${aksAgentPoolVMSize}."
+ exit 1
+ fi
+ fi
+}
+
+function validate_wls_ssl_certificates() {
+ local wlsIdentityKeyStoreFileName=${AZ_SCRIPTS_PATH_OUTPUT_DIRECTORY}/identity.keystore
+ local wlsTrustKeyStoreFileName=${AZ_SCRIPTS_PATH_OUTPUT_DIRECTORY}/trust.keystore
+ echo "$WLS_SSL_IDENTITY_DATA" | base64 -d >$wlsIdentityKeyStoreFileName
+ echo "$WLS_SSL_TRUST_DATA" | base64 -d >$wlsTrustKeyStoreFileName
+
+ # use default Java, if no, install open jdk 11.
+ # why not using Microsoft open jdk?
+ # No apk installation package!
+ export JAVA_HOME=/usr/lib/jvm/default-jvm/
+ if [ ! -d "${JAVA_HOME}" ]; then
+ install_jdk
+ JAVA_HOME=/usr/lib/jvm/java-11-openjdk
+ fi
+ #validate if identity keystore has entry
+ ${JAVA_HOME}/bin/keytool -list -v \
+ -keystore $wlsIdentityKeyStoreFileName \
+ -storepass $WLS_SSL_IDENTITY_SHIBBOLETH \
+ -storetype $WLS_SSL_IDENTITY_TYPE |
+ grep 'Entry type:' |
+ grep 'PrivateKeyEntry'
+
+ validate_status "validate Identity Keystore."
+
+ #validate if trust keystore has entry
+ ${JAVA_HOME}/bin/keytool -list -v \
+ -keystore ${wlsTrustKeyStoreFileName} \
+ -storepass $WLS_SSL_TRUST_SHIBBOLETH \
+ -storetype $WLS_SSL_TRUST_TYPE |
+ grep 'Entry type:' |
+ grep 'trustedCertEntry'
+
+ validate_status "validate Trust Keystore."
+
+ echo_stdout "validate SSL key stores: passed!"
+}
+
+function validate_gateway_frontend_certificates() {
+ if [[ "${appGatewayCertificateOption}" == "generateCert" ]]; then
+ return
+ fi
+
+ local appgwFrontCertFileName=${AZ_SCRIPTS_PATH_OUTPUT_DIRECTORY}/gatewaycert.pfx
+ echo "$APPLICATION_GATEWAY_SSL_FRONTEND_CERT_DATA" | base64 -d >$appgwFrontCertFileName
+
+ openssl pkcs12 \
+ -in $appgwFrontCertFileName \
+ -nocerts \
+ -out ${AZ_SCRIPTS_PATH_OUTPUT_DIRECTORY}/cert.key \
+ -passin pass:${APPLICATION_GATEWAY_SSL_FRONTEND_CERT_SHIBBOLETH} \
+ -passout pass:${APPLICATION_GATEWAY_SSL_FRONTEND_CERT_SHIBBOLETH}
+
+ validate_status "access application gateway frontend key." "Make sure the Application Gateway frontend certificate is correct."
+}
+
+function validate_dns_zone() {
+ if [[ "${checkDNSZone,,}" == "true" ]]; then
+ az network dns zone show -n ${DNS_ZONE_NAME} -g ${DNS_ZONE_RESOURCEGROUP_NAME}
+ validate_status "check DNS Zone ${DNS_ZONE_NAME}" "Make sure the DNS Zone exists."
+
+ echo_stdout "Check DNS Zone: passed!"
+ fi
+}
+
+function get_aks_default_version() {
+ constDefaultAKSVersion=$(az aks get-versions --location ${location} \
+ | jq '.orchestrators[] | select(.default==true) | .orchestratorVersion' \
+ | tr -d "\"")
+
+ validate_status "get AKS default version ${constDefaultAKSVersion}"
+}
+
+function validate_aks_version() {
+ if [[ "${USE_AKS_WELL_TESTED_VERSION,,}" == "true" ]]; then
+ local aksWellTestedVersionFile=aks_well_tested_version.json
+ # download the json file that has well-tested version from weblogic-azure repo.
+ curl -L "${gitUrl4AksWellTestedVersionJsonFile}" --retry ${retryMaxAttempt} -o ${aksWellTestedVersionFile}
+ local aksWellTestedVersion=$(cat ${aksWellTestedVersionFile} | jq ".value" | tr -d "\"")
+ echo "AKS well-tested version: ${aksWellTestedVersion}"
+ # check if the well-tested version is supported in the location
+ local ret=$(az aks get-versions --location ${location} \
+ | jq ".orchestrators[] | select(.orchestratorVersion == \"${aksWellTestedVersion}\") | .orchestratorVersion" \
+ | tr -d "\"")
+ if [[ "${aksWellTestedVersion}" != "" ]] && [[ "${ret}" == "${aksWellTestedVersion}" ]]; then
+ outputAksVersion=${aksWellTestedVersion}
+ else
+ # if the well-tested version is invalid, use default version.
+ get_aks_default_version
+ outputAksVersion=${constDefaultAKSVersion}
+ fi
+ else
+ # check if the input version is supported in the location
+ local ret=$(az aks get-versions --location ${location} \
+ | jq ".orchestrators[] | select(.orchestratorVersion == \"${AKS_VERSION}\") | .orchestratorVersion" \
+ | tr -d "\"")
+ if [[ "${ret}" == "${AKS_VERSION}" ]]; then
+ outputAksVersion=${AKS_VERSION}
+ else
+ echo_stderr "ERROR: invalid aks version ${AKS_VERSION} in ${location}."
+ exit 1
+ fi
+ fi
+}
+
+function validate_aks_networking() {
+ local networkPluginMode=$(az aks show -g ${AKS_CLUSTER_RESOURCEGROUP_NAME} -n ${AKS_CLUSTER_NAME} | jq '.networkProfile.networkPluginMode' | tr -d "\"")
+ local networkPlugin=$(az aks show -g ${AKS_CLUSTER_RESOURCEGROUP_NAME} -n ${AKS_CLUSTER_NAME} | jq '.networkProfile.networkPlugin' | tr -d "\"")
+
+ if [[ "${networkPluginMode}" != "null" ]]; then
+ echo_stderr "ERROR: invalid network plugin mode ${networkPluginMode} for ${AKS_CLUSTER_NAME}."
+ exit 1
+ fi
+
+ if [[ "${networkPlugin}" != "azure" ]]; then
+ echo_stderr "ERROR: invalid network plugin ${networkPlugin} for ${AKS_CLUSTER_NAME}."
+ exit 1
+ fi
+}
+
+function enable_aks_managed_identity() {
+ local identityLength=$(az aks show -g ${AKS_CLUSTER_RESOURCEGROUP_NAME} -n ${AKS_CLUSTER_NAME} | jq '.identity | length')
+ echo "identityLength ${identityLength}"
+
+ if [ $identityLength -lt 1 ]; then
+ echo "enable managed identity..."
+ # Your cluster is using service principal, and you are going to update the cluster to use systemassigned managed identity.
+ # After updating, your cluster's control plane and addon pods will switch to use managed identity, but kubelet will KEEP USING SERVICE PRINCIPAL until you upgrade your agentpool.
+ az aks update -y -g ${AKS_CLUSTER_RESOURCEGROUP_NAME} -n ${AKS_CLUSTER_NAME} --enable-managed-identity
+
+ validate_status "Enable Applciation Gateway Ingress Controller for ${AKS_CLUSTER_NAME}."
+ fi
+}
+
+# VNET input sample:
+# {
+# "name": "wlsaks-vnet",
+# "resourceGroup": "haiche-test",
+# "addressPrefixes": [
+# "10.3.0.0/28"
+# ],
+# "addressPrefix": "10.3.0.0/28",
+# "newOrExisting": "new",
+# "subnets": {
+# "gatewaySubnet": {
+# "name": "wls-aks-gateway-subnet",
+# "addressPrefix": "10.3.0.0/29",
+# "startAddress": "10.3.0.4"
+# }
+# }
+# }
+# To make sure the subnet only have application gateway
+function validate_appgateway_vnet() {
+ echo_stdout "VNET for application gateway: ${VNET_FOR_APPLICATIONGATEWAY}"
+ local vnetName=$(echo ${VNET_FOR_APPLICATIONGATEWAY} | jq '.name' | tr -d "\"")
+ local vnetResourceGroup=$(echo ${VNET_FOR_APPLICATIONGATEWAY} | jq '.resourceGroup' | tr -d "\"")
+ local newOrExisting=$(echo ${VNET_FOR_APPLICATIONGATEWAY} | jq '.newOrExisting' | tr -d "\"")
+ local subnetName=$(echo ${VNET_FOR_APPLICATIONGATEWAY} | jq '.subnets.gatewaySubnet.name' | tr -d "\"")
+
+ if [[ "${newOrExisting,,}" != "new" ]]; then
+ # the subnet can only have Application Gateway.
+ # query ipConfigurations:
+ # if lenght of ipConfigurations is greater than 0, the subnet fails to meet requirement of Application Gateway.
+ local ret=$(az network vnet show \
+ -g ${vnetResourceGroup} \
+ --name ${vnetName} \
+ | jq ".subnets[] | select(.name==\"${subnetName}\") | .ipConfigurations | length")
+
+ if [ $ret -gt 0 ]; then
+ echo_stderr "ERROR: invalid subnet for Application Gateway, the subnet has ${ret} connected device(s). Make sure the subnet is only for Application Gateway."
+ exit 1
+ fi
+ fi
+}
+
+function query_available_zones() {
+ if [[ "${createAKSCluster,,}" == "true" ]]; then
+ outputAvailableZones=$(az vm list-skus -l ${location} --size ${aksAgentPoolVMSize} --zone true | jq -c '.[] | .locationInfo[] | .zones')
+ fi
+
+ if [ -z "${outputAvailableZones}" ]; then
+ outputAvailableZones="[]"
+ fi
+
+ export outputAvailableZones="${outputAvailableZones}"
+}
+
+function output_result() {
+ echo "AKS version: ${outputAksVersion}"
+ result=$(jq -n -c \
+ --arg aksVersion "$outputAksVersion" \
+ --arg agentAvailabilityZones "${outputAvailableZones}" \
+ '{aksVersion: $aksVersion, agentAvailabilityZones: $agentAvailabilityZones}')
+ echo "result is: $result"
+ echo $result >$AZ_SCRIPTS_OUTPUT_PATH
+}
+
+# main
+location=$1
+createAKSCluster=$2
+aksAgentPoolVMSize=$3
+aksAgentPoolNodeCount=$4
+useOracleImage=$5
+wlsImageTag=$6
+userProvidedImagePath=$7
+enableCustomSSL=$8
+appGatewayCertificateOption=${9}
+enableAppGWIngress=${10}
+checkDNSZone=${11}
+
+outputAksVersion=${constDefaultAKSVersion}
+
+# install docker cli
+install_docker
+
+validate_compute_resources
+
+validate_memory_resources
+
+validate_base_image_path
+
+validate_acr_admin_enabled
+
+validate_image_compatibility
+
+if [[ "${enableCustomSSL,,}" == "true" ]]; then
+ validate_wls_ssl_certificates
+fi
+
+if [[ "${enableAppGWIngress,,}" == "true" ]]; then
+ validate_gateway_frontend_certificates
+fi
+
+validate_dns_zone
+
+if [[ "${createAKSCluster,,}" == "true" ]]; then
+ validate_aks_version
+fi
+
+# validate existing aks cluster
+if [[ "${createAKSCluster,,}" != "true" ]]; then
+ validate_aks_networking
+ enable_aks_managed_identity
+fi
+
+validate_appgateway_vnet
+
+query_available_zones
+
+output_result
diff --git a/weblogic-azure-aks/src/main/arm/scripts/invokeSetupDBConnections.sh b/weblogic-azure-aks/src/main/arm/scripts/invokeSetupDBConnections.sh
deleted file mode 100644
index 903e97ced..000000000
--- a/weblogic-azure-aks/src/main/arm/scripts/invokeSetupDBConnections.sh
+++ /dev/null
@@ -1,63 +0,0 @@
-# Copyright (c) 2021, Oracle Corporation and/or its affiliates.
-# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
-# This script runs on Azure Container Instance with Alpine Linux that Azure Deployment script creates.
-
-#Function to display usage message
-function usage() {
- usage=$(cat <<-END
-Usage:
-./invokeSetupDBConnections.sh
-
-
-
-
-
-
-
-
-
-
-
-END
-)
- echo_stdout "${usage}"
- if [ $1 -eq 1 ]; then
- echo_stderr "${usage}"
- exit 1
- fi
-}
-
-# Main script
-export script="${BASH_SOURCE[0]}"
-export scriptDir="$(cd "$(dirname "${script}")" && pwd)"
-
-source ${scriptDir}/utility.sh
-
-export aksClusterRGName=$1
-export aksClusterName=$2
-export databaseType=$3
-dbPassword=$4
-export dbUser=$5
-export dsConnectionURL=$6
-export jdbcDataSourceName=$7
-export wlsDomainUID=$8
-export wlsUser=$9
-wlsPassword=${10}
-export dbOptType=${11}
-
-echo ${dbPassword} \
- ${wlsPassword} | \
- bash ./setupDBConnections.sh \
- ${aksClusterRGName} \
- ${aksClusterName} \
- ${databaseType} \
- ${dbUser} \
- ${dsConnectionURL} \
- ${jdbcDataSourceName} \
- ${wlsDomainUID} \
- ${wlsUser} \
- ${dbOptType}
-
-if [ $? -ne 0 ]; then
- usage 1
-fi
diff --git a/weblogic-azure-aks/src/main/arm/scripts/invokeSetupNetworking.sh b/weblogic-azure-aks/src/main/arm/scripts/invokeSetupNetworking.sh
deleted file mode 100644
index 32edb91ad..000000000
--- a/weblogic-azure-aks/src/main/arm/scripts/invokeSetupNetworking.sh
+++ /dev/null
@@ -1,111 +0,0 @@
-# Copyright (c) 2021, Oracle Corporation and/or its affiliates.
-# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
-# This script runs on Azure Container Instance with Alpine Linux that Azure Deployment script creates.
-
-#Function to display usage message
-function usage() {
- usage=$(cat <<-END
-Usage:
-./invokeSetupNetworking.sh
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-END
-)
- echo_stdout "${usage}"
- if [ $1 -eq 1 ]; then
- echo_stderr "${usage}"
- exit 1
- fi
-}
-
-# Main script
-export script="${BASH_SOURCE[0]}"
-export scriptDir="$(cd "$(dirname "${script}")" && pwd)"
-
-source ${scriptDir}/utility.sh
-
-export aksClusterRGName=$1
-export aksClusterName=$2
-export wlsDomainName=$3
-export wlsDomainUID=$4
-export lbSvcValues=$5
-export enableAppGWIngress=$6
-export subID=$7
-export curRGName=${8}
-export appgwName=${9}
-export vnetName=${10}
-spBase64String=${11}
-export appgwForAdminServer=${12}
-export enableCustomDNSAlias=${13}
-export dnsRGName=${14}
-export dnsZoneName=${15}
-export dnsAdminLabel=${16}
-export dnsClusterLabel=${17}
-export appgwAlias=${18}
-export enableInternalLB=${19}
-export appgwFrontendSSLCertData=${20}
-appgwFrontendSSLCertPsw=${21}
-export appgwCertificateOption=${22}
-export enableCustomSSL=${23}
-export enableCookieBasedAffinity=${24}
-export enableRemoteConsole=${25}
-export dnszoneAdminT3ChannelLabel=${26}
-export dnszoneClusterT3ChannelLabel=${27}
-
-echo ${spBase64String} \
- ${appgwFrontendSSLCertPsw} | \
- bash ./setupNetworking.sh \
- ${aksClusterRGName} \
- ${aksClusterName} \
- ${wlsDomainName} \
- ${wlsDomainUID} \
- ${lbSvcValues} \
- ${enableAppGWIngress} \
- ${subID} \
- ${curRGName} \
- ${appgwName} \
- ${vnetName} \
- ${appgwForAdminServer} \
- ${enableCustomDNSAlias} \
- ${dnsRGName} \
- ${dnsZoneName} \
- ${dnsAdminLabel} \
- ${dnsClusterLabel} \
- ${appgwAlias} \
- ${enableInternalLB} \
- ${appgwFrontendSSLCertData} \
- ${appgwCertificateOption} \
- ${enableCustomSSL} \
- ${enableCookieBasedAffinity} \
- ${enableRemoteConsole} \
- ${dnszoneAdminT3ChannelLabel} \
- ${dnszoneClusterT3ChannelLabel}
-
-if [ $? -ne 0 ]; then
- usage 1
-fi
diff --git a/weblogic-azure-aks/src/main/arm/scripts/invokeSetupWLSDomain.sh b/weblogic-azure-aks/src/main/arm/scripts/invokeSetupWLSDomain.sh
deleted file mode 100644
index 22d559a25..000000000
--- a/weblogic-azure-aks/src/main/arm/scripts/invokeSetupWLSDomain.sh
+++ /dev/null
@@ -1,135 +0,0 @@
-# Copyright (c) 2021, Oracle Corporation and/or its affiliates.
-# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
-# This script runs on Azure Container Instance with Alpine Linux that Azure Deployment script creates.
-
-#Function to display usage message
-function usage() {
- usage=$(cat <<-END
-Usage:
-./invokeSetupWLSDomain.sh
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-END
-)
- echo_stdout "${usage}"
- if [ $1 -eq 1 ]; then
- echo_stderr "${usage}"
- exit 1
- fi
-}
-
-# Main script
-export script="${BASH_SOURCE[0]}"
-export scriptDir="$(cd "$(dirname "${script}")" && pwd)"
-
-source ${scriptDir}/utility.sh
-
-export ocrSSOUser=$1
-ocrSSOPSW=$2
-export aksClusterRGName=$3
-export aksClusterName=$4
-export wlsImageTag=$5
-export acrName=$6
-export wlsDomainName=$7
-export wlsDomainUID=$8
-export wlsUserName=$9
-wlsPassword=${10}
-wdtRuntimePassword=${11}
-export wlsCPU=${12}
-export wlsMemory=${13}
-export managedServerPrefix=${14}
-export appReplicas=${15}
-export appPackageUrls=${16}
-export currentResourceGroup=${17}
-export scriptURL=${18}
-export storageAccountName=${19}
-export wlsClusterSize=${20}
-export enableCustomSSL=${21}
-export wlsIdentityData=${22}
-wlsIdentityPsw=${23}
-export wlsIdentityType=${24}
-export wlsIdentityAlias=${25}
-wlsIdentityKeyPsw=${26}
-export wlsTrustData=${27}
-wlsTrustPsw=${28}
-export wlsTrustType=${29}
-export enablePV=${30}
-export enableAdminT3Tunneling=${31}
-export enableClusterT3Tunneling=${32}
-export t3AdminPort=${33}
-export t3ClusterPort=${34}
-export wlsJavaOption=${35}
-
-echo ${ocrSSOPSW} \
- ${wlsPassword} \
- ${wdtRuntimePassword} \
- ${wlsIdentityPsw} \
- ${wlsIdentityKeyPsw} \
- ${wlsTrustPsw} | \
- bash ./setupWLSDomain.sh \
- ${ocrSSOUser} \
- ${aksClusterRGName} \
- ${aksClusterName} \
- ${wlsImageTag} \
- ${acrName} \
- ${wlsDomainName} \
- ${wlsDomainUID} \
- ${wlsUserName} \
- ${wlsCPU} \
- ${wlsMemory} \
- ${managedServerPrefix} \
- ${appReplicas} \
- ${appPackageUrls} \
- ${currentResourceGroup} \
- ${scriptURL} \
- ${storageAccountName} \
- ${wlsClusterSize} \
- ${enableCustomSSL} \
- ${wlsIdentityData} \
- ${wlsIdentityType} \
- ${wlsIdentityAlias} \
- ${wlsTrustData} \
- ${wlsTrustType} \
- ${enablePV} \
- ${enableAdminT3Tunneling} \
- ${enableClusterT3Tunneling} \
- ${t3AdminPort} \
- ${t3ClusterPort} \
- ${wlsJavaOption}
-
-if [ $? -ne 0 ]; then
- usage 1
-fi
diff --git a/weblogic-azure-aks/src/main/arm/scripts/invokeUpdateApplications.sh b/weblogic-azure-aks/src/main/arm/scripts/invokeUpdateApplications.sh
index 2d5234750..7d3b7ca0c 100644
--- a/weblogic-azure-aks/src/main/arm/scripts/invokeUpdateApplications.sh
+++ b/weblogic-azure-aks/src/main/arm/scripts/invokeUpdateApplications.sh
@@ -20,6 +20,8 @@ Usage:
+
+
END
)
echo_stdout "${usage}"
@@ -48,6 +50,8 @@ export appPackageUrls=${10}
export scriptURL=${11}
export appStorageAccountName=${12}
export appContainerName=${13}
+export userProvidedImagePath=${14}
+export useOracleImage=${15}
echo ${ocrSSOPSW} | \
bash ./updateApplications.sh \
@@ -62,7 +66,9 @@ echo ${ocrSSOPSW} | \
${appPackageUrls} \
${scriptURL} \
${appStorageAccountName} \
- ${appContainerName}
+ ${appContainerName} \
+ ${userProvidedImagePath} \
+ ${useOracleImage}
if [ $? -ne 0 ]; then
usage 1
diff --git a/weblogic-azure-aks/src/main/arm/scripts/pv.yaml.template b/weblogic-azure-aks/src/main/arm/scripts/pv.yaml.template
index 349ced5c6..74ee054b1 100644
--- a/weblogic-azure-aks/src/main/arm/scripts/pv.yaml.template
+++ b/weblogic-azure-aks/src/main/arm/scripts/pv.yaml.template
@@ -21,7 +21,7 @@ spec:
namespace: @NAMESPACE@
azureFile:
secretName: azure-secret
- shareName: weblogic
+ shareName: @FILE_SHARE_NAME@
readOnly: false
mountOptions:
- dir_mode=0777
diff --git a/weblogic-azure-aks/src/main/arm/scripts/queryStorageAccount.sh b/weblogic-azure-aks/src/main/arm/scripts/queryStorageAccount.sh
deleted file mode 100644
index a9241912e..000000000
--- a/weblogic-azure-aks/src/main/arm/scripts/queryStorageAccount.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-export aksClusterRGName=$1
-export aksClusterName=$2
-
-export currentStorageAccount="null"
-
-# Connect to AKS cluster
-function connect_aks_cluster() {
- az aks get-credentials \
- --resource-group ${aksClusterRGName} \
- --name ${aksClusterName} \
- --overwrite-existing
-}
-
-function query_storage_account() {
- echo "install kubectl"
- az aks install-cli
-
- echo "get pv name"
- pvName=$(kubectl get pv -o json |
- jq '.items[] | select(.status.phase=="Bound") | [.metadata.name] | .[0]' |
- tr -d "\"")
-
- if [[ "${pvName}" != "null" ]] && [[ "${pvName}" != "" ]]; then
- # this is a workaround for update domain using marketplace offer.
- # the offer will create a new storage account in a new resource group if there is no storage attached.
- currentStorageAccount=$(kubectl get pv ${pvName} -o json | jq '. | .metadata.labels.storageAccount' | tr -d "\"")
- fi
-}
-
-function output_result() {
- echo ${currentStorageAccount}
-
- result=$(jq -n -c \
- --arg storageAccount $currentStorageAccount \
- '{storageAccount: $storageAccount}')
- echo "result is: $result"
- echo $result >$AZ_SCRIPTS_OUTPUT_PATH
-}
-
-connect_aks_cluster
-
-query_storage_account
-
-output_result
diff --git a/weblogic-azure-aks/src/main/arm/scripts/setupDBConnections.sh b/weblogic-azure-aks/src/main/arm/scripts/setupDBConnections.sh
index fd5283aa8..446976b02 100644
--- a/weblogic-azure-aks/src/main/arm/scripts/setupDBConnections.sh
+++ b/weblogic-azure-aks/src/main/arm/scripts/setupDBConnections.sh
@@ -1,30 +1,31 @@
-# Copyright (c) 2021, Oracle Corporation and/or its affiliates.
+# Copyright (c) 2021, 2024 Oracle Corporation and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
echo "Script ${0} starts"
-# read from stdin
-function read_sensitive_parameters_from_stdin() {
- read dbPassword wlsPassword
-}
-
#Function to display usage message
function usage() {
usage=$(cat <<-END
Usage:
-echo |
- ./setupDBConnections.sh
-
-
-
-
-
-
-
-
-
+You must specify the following environment variables:
+AKS_RESOURCE_GROUP_NAME: the name of resource group that runs the AKS cluster.
+AKS_NAME: the name of the AKS cluster.
+DATABASE_TYPE: one of the supported database types.
+DB_CONFIGURATION_TYPE: createOrUpdate: create a new data source connection, or update an existing data source connection. delete: delete an existing data source connection.
+DB_SHIBBOLETH: password for Database.
+DB_USER: user id of Database.
+DB_CONNECTION_STRING: JDBC Connection String.
+DB_DRIVER_NAME: datasource driver name, must be specified if database type is otherdb.
+ENABLE_SHIBBOLETHLESS_CONNECTION: true to enable passwordless connection
+GLOBAL_TRANSATION_PROTOCOL: Determines the transaction protocol (global transaction processing behavior) for the data source.
+JDBC_DATASOURCE_NAME: JNDI Name for JDBC Datasource.
+TEST_TABLE_NAME: the name of the database table to use when testing physical database connections. This name is required when you specify a Test Frequency and enable Test Reserved Connections.
+WLS_DOMAIN_UID: UID of WebLogic domain, used in WebLogic Operator.
+WLS_DOMAIN_USER: user name for WebLogic Administrator.
+WLS_DOMAIN_SHIBBOLETH: passowrd for WebLogic Administrator.
END
)
+
echo_stdout "${usage}"
if [ $1 -eq 1 ]; then
echo_stderr "${usage}"
@@ -34,53 +35,54 @@ END
#Function to validate input
function validate_input() {
- if [[ -z "$aksClusterRGName" || -z "${aksClusterName}" ]]; then
- echo_stderr "aksClusterRGName and aksClusterName are required. "
+ if [[ -z "$AKS_RESOURCE_GROUP_NAME" || -z "${AKS_NAME}" ]]; then
+ echo_stderr "AKS_RESOURCE_GROUP_NAME and AKS_NAME are required. "
usage 1
fi
- if [ -z "$databaseType" ]; then
- echo_stderr "databaseType is required. "
+ if [ -z "$DATABASE_TYPE" ]; then
+ echo_stderr "DATABASE_TYPE is required. "
usage 1
fi
- if [[ -z "$dbPassword" || -z "${dbUser}" ]]; then
- echo_stderr "dbPassword and dbUser are required. "
+ if [[ -z "${DB_SHIBBOLETH}" || -z "${DB_USER}" ]]; then
+ echo_stderr "DB_SHIBBOLETH and DB_USER are required. "
usage 1
fi
- if [ -z "$dsConnectionURL" ]; then
- echo_stderr "dsConnectionURL is required. "
+ if [ -z "$DB_CONNECTION_STRING" ]; then
+ echo_stderr "DB_CONNECTION_STRING is required. "
usage 1
fi
- if [ -z "$jdbcDataSourceName" ]; then
- echo_stderr "jdbcDataSourceName is required. "
+ if [ -z "$JDBC_DATASOURCE_NAME" ]; then
+ echo_stderr "JDBC_DATASOURCE_NAME is required. "
usage 1
fi
- if [ -z "$wlsDomainUID" ]; then
- echo_stderr "wlsDomainUID is required. "
+ if [ -z "$WLS_DOMAIN_UID" ]; then
+ echo_stderr "WLS_DOMAIN_UID is required. "
usage 1
fi
- if [[ -z "$wlsUser" || -z "${wlsPassword}" ]]; then
- echo_stderr "wlsUser and wlsPassword are required. "
+ if [[ -z "$WLS_DOMAIN_USER" || -z "${WLS_DOMAIN_SHIBBOLETH}" ]]; then
+ echo_stderr "WLS_DOMAIN_USER and WLS_DOMAIN_SHIBBOLETH are required. "
usage 1
fi
-}
-# Connect to AKS cluster
-function connect_aks_cluster() {
- az aks get-credentials \
- --resource-group ${aksClusterRGName} \
- --name ${aksClusterName} \
- --overwrite-existing
+ # reset shibboleth
+ if [[ "${ENABLE_SHIBBOLETHLESS_CONNECTION,,}" == "true" ]]; then
+ DB_SHIBBOLETH=""
+
+ if [[ "${DATABASE_TYPE}" == "${constDBTypeSqlServer}" ]]; then
+ DB_USER=""
+ fi
+ fi
}
function create_datasource_model_configmap_and_secret() {
echo "get data source secret name"
- jndiLabel=${jdbcDataSourceName//\//\_}
+ jndiLabel=${JDBC_DATASOURCE_NAME//\//\_}
secretLen=$(kubectl get secret -n ${wlsDomainNS} -l datasource.JNDI="${jndiLabel}" -o json \
| jq '.items | length')
if [ ${secretLen} -ge 1 ];then
@@ -88,27 +90,19 @@ function create_datasource_model_configmap_and_secret() {
| jq ".items[0].metadata.name" \
| tr -d "\"")
else
- dbSecretName="ds-secret-${databaseType}-${datetime}"
+ dbSecretName="ds-secret-${DATABASE_TYPE}-${datetime}"
fi
echo "Data source secret name: ${dbSecretName}"
chmod ugo+x $scriptDir/dbUtility.sh
- echo "${dbPassword}" | \
- bash $scriptDir/dbUtility.sh \
- ${databaseType} \
- "${dbUser}" \
- "${dsConnectionURL}" \
- "${jdbcDataSourceName}" \
- "${wlsDomainUID}" \
- "${dbSecretName}" \
- "${optTypeUpdate}"
+ bash $scriptDir/dbUtility.sh ${dbSecretName} ${optTypeUpdate}
}
function apply_datasource_to_domain() {
echo "apply datasoure"
# get domain configurations
domainConfigurationJsonFile=$scriptDir/domain.json
- kubectl -n ${wlsDomainNS} get domain ${wlsDomainUID} -o json >${domainConfigurationJsonFile}
+ kubectl -n ${wlsDomainNS} get domain ${WLS_DOMAIN_UID} -o json >${domainConfigurationJsonFile}
restartVersion=$(cat ${domainConfigurationJsonFile} | jq '. | .spec.restartVersion' | tr -d "\"")
secretList=$(cat ${domainConfigurationJsonFile} | jq -r '. | .spec.configuration.secrets')
@@ -136,16 +130,18 @@ function apply_datasource_to_domain() {
# apply the secret
# restart the domain
timestampBeforePatchingDomain=$(date +%s)
- kubectl -n ${wlsDomainNS} patch domain ${wlsDomainUID} \
+ kubectl -n ${wlsDomainNS} patch domain ${WLS_DOMAIN_UID} \
--type=json \
-p '[{"op": "replace", "path": "/spec/restartVersion", "value": "'${restartVersion}'" }, {"op": "replace", "path": "/spec/configuration/model/configMap", "value":'${wlsConfigmapName}'}, {"op": "replace", "path": "/spec/configuration/secrets", "value": '${secretStrings}'}]'
+
+ utility_validate_status "Patch DB configuration."
}
function remove_datasource_from_domain() {
echo "remove datasoure secret from domain configuration"
# get domain configurations
domainConfigurationJsonFile=$scriptDir/domain.json
- kubectl -n ${wlsDomainNS} get domain ${wlsDomainUID} -o json >${domainConfigurationJsonFile}
+ kubectl -n ${wlsDomainNS} get domain ${WLS_DOMAIN_UID} -o json >${domainConfigurationJsonFile}
restartVersion=$(cat ${domainConfigurationJsonFile} | jq '. | .spec.restartVersion' | tr -d "\"")
secretList=$(cat ${domainConfigurationJsonFile} | jq -r '. | .spec.configuration.secrets')
@@ -183,20 +179,25 @@ function remove_datasource_from_domain() {
# apply the secret
# restart the domain
timestampBeforePatchingDomain=$(date +%s)
- kubectl -n ${wlsDomainNS} patch domain ${wlsDomainUID} \
+ kubectl -n ${wlsDomainNS} patch domain ${WLS_DOMAIN_UID} \
--type=json \
-p '[{"op": "replace", "path": "/spec/restartVersion", "value": "'${restartVersion}'" }, {"op": "replace", "path": "/spec/configuration/model/configMap", "value":'${wlsConfigmapName}'}, {"op": "replace", "path": "/spec/configuration/secrets", "value": '${secretStrings}'}]'
+
+ utility_validate_status "Patch DB configuration."
}
function wait_for_operation_completed() {
# Make sure all of the pods are running.
- replicas=$(kubectl -n ${wlsDomainNS} get domain ${wlsDomainUID} -o json \
- | jq '. | .spec.clusters[] | .replicas')
+
+ clusterName=$(kubectl get cluster -n ${wlsDomainNS} -o json | jq -r '.items[0].metadata.name')
+
+ replicas=$(kubectl -n ${wlsDomainNS} get cluster ${clusterName} -o json \
+ | jq '. | .spec.replicas')
utility_wait_for_pod_restarted \
${timestampBeforePatchingDomain} \
${replicas} \
- ${wlsDomainUID} \
+ ${WLS_DOMAIN_UID} \
${checkPodStatusMaxAttemps} \
${checkPodStatusInterval}
@@ -208,19 +209,11 @@ function wait_for_operation_completed() {
}
function delete_datasource() {
- echo "remove secret and model of data source ${jdbcDataSourceName}"
+ echo "remove secret and model of data source ${JDBC_DATASOURCE_NAME}"
# remove secret
# remove model
chmod ugo+x $scriptDir/dbUtility.sh
- echo "${dbPassword}" | \
- bash $scriptDir/dbUtility.sh \
- ${databaseType} \
- "${dbUser}" \
- "${dsConnectionURL}" \
- "${jdbcDataSourceName}" \
- "${wlsDomainUID}" \
- "${dbSecretName}" \
- "${optTypeDelete}"
+ bash $scriptDir/dbUtility.sh ${dbSecretName} ${optTypeDelete}
# update weblogic domain
remove_datasource_from_domain
@@ -233,7 +226,7 @@ function validate_datasource() {
testDatasourceScript=${scriptDir}/${dsScriptFileName}
podNum=$(kubectl -n ${wlsDomainNS} get pod -l weblogic.clusterName=${wlsClusterName} -o json | jq '.items| length')
if [ ${podNum} -le 0 ]; then
- echo "Ensure your cluster has at least one pod."
+ echo_stderr "Ensure your cluster has at least one pod."
exit 1
fi
@@ -245,11 +238,11 @@ function validate_datasource() {
clusterTargetPort=$(kubectl get svc ${wlsClusterSvcName} -n ${wlsDomainNS} -o json | jq '.spec.ports[] | select(.name=="default") | .port')
t3ConnectionString="t3://${wlsClusterSvcName}.${wlsDomainNS}.svc.cluster.local:${clusterTargetPort}"
cat <${testDatasourceScript}
-connect('${wlsUser}', '${wlsPassword}', '${t3ConnectionString}')
+connect('${WLS_DOMAIN_USER}', '${WLS_DOMAIN_SHIBBOLETH}', '${t3ConnectionString}')
serverRuntime()
print 'start to query data source jndi bean'
dsMBeans = cmo.getJDBCServiceRuntime().getJDBCDataSourceRuntimeMBeans()
-ds_name = '${jdbcDataSourceName}'
+ds_name = '${JDBC_DATASOURCE_NAME}'
for ds in dsMBeans:
if (ds_name == ds.getName()):
print 'DS name is: '+ds.getName()
@@ -259,54 +252,46 @@ EOF
echo "copy test script ${testDatasourceScript} to pod path /tmp/${dsScriptFileName}"
targetDSFilePath=/tmp/${dsScriptFileName}
kubectl cp ${testDatasourceScript} -n ${wlsDomainNS} ${podName}:${targetDSFilePath}
- kubectl exec -it ${podName} -n ${wlsDomainNS} -c ${wlsContainerName} -- bash -c "wlst.sh ${targetDSFilePath}" | grep "State is Running"
+ echo "execute script to validate data source ${JDBC_DATASOURCE_NAME} in pod ${podName} with wlsContainerName ${wlsContainerName}"
+ kubectl exec ${podName} -n ${wlsDomainNS} -c ${wlsContainerName} -- bash -c "wlst.sh ${targetDSFilePath}" | grep "State is Running"
if [ $? == 1 ];then
- echo "Failed to configure datasource ${jdbcDataSourceName}. Please make sure the input values are correct."
- delete_datasource
+ echo_stderr "Failed to configure datasource ${JDBC_DATASOURCE_NAME}. Please make sure the input values are correct."
exit 1
+ else
+ echo "Data source ${JDBC_DATASOURCE_NAME} is configured successfully."
fi
}
# Main script
+set -Eo pipefail
+
export script="${BASH_SOURCE[0]}"
export scriptDir="$(cd "$(dirname "${script}")" && pwd)"
source ${scriptDir}/common.sh
source ${scriptDir}/utility.sh
-export aksClusterRGName=$1
-export aksClusterName=$2
-export databaseType=$3
-export dbUser=$4
-export dsConnectionURL=$5
-export jdbcDataSourceName=$6
-export wlsDomainUID=$7
-export wlsUser=$8
-export dbOptType=$9
-
export datetime=$(date +%s)
export optTypeDelete='delete'
export optTypeUpdate='createOrUpdate'
export wlsClusterName="cluster-1"
-export wlsClusterSvcName="${wlsDomainUID}-cluster-${wlsClusterName}"
-export wlsConfigmapName="${wlsDomainUID}-wdt-config-map"
-export wlsDomainNS="${wlsDomainUID}-ns"
-
-read_sensitive_parameters_from_stdin
+export wlsClusterSvcName="${WLS_DOMAIN_UID}-cluster-${wlsClusterName}"
+export wlsConfigmapName="${WLS_DOMAIN_UID}-wdt-config-map"
+export wlsDomainNS="${WLS_DOMAIN_UID}-ns"
validate_input
-connect_aks_cluster
+connect_aks $AKS_NAME $AKS_RESOURCE_GROUP_NAME
install_kubectl
-if [[ "${dbOptType}" == "${optTypeDelete}" ]];then
- echo "delete date source: ${jdbcDataSourceName}"
+if [[ "${DB_CONFIGURATION_TYPE}" == "${optTypeDelete}" ]];then
+ echo "delete date source: ${JDBC_DATASOURCE_NAME}"
delete_datasource
else
- echo "create/update data source: ${jdbcDataSourceName}"
+ echo "create/update data source: ${JDBC_DATASOURCE_NAME}"
create_datasource_model_configmap_and_secret
apply_datasource_to_domain
wait_for_operation_completed
diff --git a/weblogic-azure-aks/src/main/arm/scripts/setupNetworking.sh b/weblogic-azure-aks/src/main/arm/scripts/setupNetworking.sh
index 0fb9500e7..084524d72 100644
--- a/weblogic-azure-aks/src/main/arm/scripts/setupNetworking.sh
+++ b/weblogic-azure-aks/src/main/arm/scripts/setupNetworking.sh
@@ -1,13 +1,8 @@
-# Copyright (c) 2021, Oracle Corporation and/or its affiliates.
+# Copyright (c) 2021, 2024 Oracle Corporation and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
echo "Script ${0} starts"
-# read from stdin
-function read_sensitive_parameters_from_stdin() {
- read spBase64String appgwFrontendSSLCertPsw
-}
-
# Install latest kubectl and Helm
function install_utilities() {
if [ -d "apps" ]; then
@@ -21,251 +16,133 @@ function install_utilities() {
install_kubectl
}
-#Function to display usage message
-function usage() {
- usage=$(
- cat <<-END
-Usage:
-echo |
- ./setupNetworking.sh
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-