From 920705b4c384a3ae58a41d675340f6e9ba3b06c2 Mon Sep 17 00:00:00 2001 From: Paul Parkinson Date: Wed, 23 Mar 2022 11:36:57 -0400 Subject: [PATCH 1/8] add list of default metrics in readme --- README.md | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/README.md b/README.md index 17431fa..7dd241f 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,34 @@ This distribution contains scripts and code for exporting metrics and logs from v1 (preview) - contains export of key database metrics to Prometheus and suggested Grafana dashboard +The following metrics are exposed currently by default. + +- oracledb_exporter_last_scrape_duration_seconds +- oracledb_exporter_last_scrape_error +- oracledb_exporter_scrapes_total +- oracledb_up +- oracledb_activity_execute_count +- oracledb_activity_parse_count_total +- oracledb_activity_user_commits +- oracledb_activity_user_rollbacks +- oracledb_sessions_activity +- oracledb_wait_time_application +- oracledb_wait_time_commit +- oracledb_wait_time_concurrency +- oracledb_wait_time_configuration +- oracledb_wait_time_network +- oracledb_wait_time_other +- oracledb_wait_time_scheduler +- oracledb_wait_time_system_io +- oracledb_wait_time_user_io +- oracledb_tablespace_bytes +- oracledb_tablespace_max_bytes +- oracledb_tablespace_free +- oracledb_tablespace_used_percent +- oracledb_process_count +- oracledb_resource_current_utilization +- oracledb_resource_limit_value + ## Table of Contents - [Unified App Dev Monitoring with Oracle Database](#unified-app-dev-monitoring-with-oracle-database) From 56903feb6cd2a111fba7b4049a011f2e12e94639 Mon Sep 17 00:00:00 2001 From: Paul Parkinson Date: Sat, 18 Jun 2022 20:13:35 -0400 Subject: [PATCH 2/8] readme updates --- README.md | 35 +++++++++++++++++++---------------- examples/README.md | 2 +- 2 files changed, 20 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index 5330957..02f3118 100644 --- a/README.md +++ b/README.md @@ -33,32 +33,35 @@ Docker image can be pushed to $DOCKER_REGISTRY using the following. `./push.sh` -### Running +### Run Ensure the environment variable DATA_SOURCE_NAME (and TNS_ADMIN if appropriate) is set correctly before starting. -DATA_SOURCE_NAME should be in Oracle EZCONNECT format: - -19c Oracle Client supports enhanced EZCONNECT, you are able to failover to standby DB or gather some heavy metrics from active standby DB and specify some additional parameters. Within 19c client you are able to connect 12c primary/standby DB too :) For Example: ```bash -# export Oracle location: -export DATA_SOURCE_NAME=system/password@oracle-sid -# or using a complete url: -export DATA_SOURCE_NAME=user/password@//myhost:1521/service -# 19c client for primary/standby configuration -export DATA_SOURCE_NAME=user/password@//primaryhost:1521,standbyhost:1521/service -# 19c client for primary/standby configuration with options -export DATA_SOURCE_NAME=user/password@//primaryhost:1521,standbyhost:1521/service?connect_timeout=5&transport_connect_timeout=3&retry_count=3 -# 19c client for ASM instance connection (requires SYSDBA) -export DATA_SOURCE_NAME=user/password@//primaryhost:1521,standbyhost:1521/+ASM?as=sysdba -# Then run the exporter -/path/to/binary/oracle-db-monitoring-exporter --log.level error --web.listen-address 0.0.0.0:9161 +export DATA_SOURCE_NAME="%USER%/$(dbpassword)@%PDB_NAME%_tp" +``` + +Kubernetes Secrets, etc. an of course be used to store password. + +OCI Vault support for storing/accessing password values is built into exporters and is enabled by simply setting the OCI_REGION and VAULT_SECRET_OCID variables. + +For Example: + +```bash +export OCI_REGION="us-ashburn-1" +export VAULT_SECRET_OCID="ocid..." ``` The only other required environment variable is DEFAULT_METRICS value which is set to the location of the config file. +For Example: + +```bash +export DEFAULT_METRICS="/msdataworkshop/observability/db-metrics-%EXPORTER_NAME%-exporter-metrics.toml" +``` + Run using Java: `java -jar target/observability-exporter-0.1.0.jar` diff --git a/examples/README.md b/examples/README.md index 49440d7..3e26235 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,7 +1,7 @@ # Observability Exporter Example -Please refer to the Unified Observability in Grafana with converged Oracle Database Workshop at http://bit.ly/unifiedobservability and it's corresponding repos https://github.com/oracle/microservices-datadriven/tree/main/grabdish/observability for complete examples. +Please refer to the Unified Observability in Grafana with converged Oracle Database Workshop at http://bit.ly/unifiedobservability and it's corresponding repos https://github.com/oracle/microservices-datadriven/tree/main/grabdish/observability/db-metrics-exporter for complete examples. More examples will be provided here in the near future. From 24d22c6517c3e311e767365764fdc4faceb69a9f Mon Sep 17 00:00:00 2001 From: Paul Parkinson Date: Thu, 30 Jun 2022 17:40:29 -0400 Subject: [PATCH 3/8] various updates --- README.md | 2 + examples/README.md | 19 ----- examples/deploy.sh | 62 -------------- ...lity-exporter-example-service-monitor.yaml | 14 ---- ...bservability-exporter-example-service.yaml | 14 ---- .../observability/ObservabilityExporter.java | 2 + .../observability/logs/LogsExporter.java | 34 +++++--- .../metrics/MetricsExporter.java | 55 ++++++------ .../tracing/TracingExporter.java | 83 +++++++++++-------- 9 files changed, 104 insertions(+), 181 deletions(-) delete mode 100755 examples/deploy.sh delete mode 100644 examples/observability-exporter-example-service-monitor.yaml delete mode 100644 examples/observability-exporter-example-service.yaml diff --git a/README.md b/README.md index 02f3118..9957d61 100644 --- a/README.md +++ b/README.md @@ -15,6 +15,8 @@ All three exporters (metrics, log, and trace) can be configured in the same file The old version of the metrics exporter can be found in the [old implementation branch][old implementation branch] and the new metrics exporter implementation is backward compatible such that the same configuration for both database connection and metrics definition can be used. +Users are encouraged to open issues and enhancements requests against this github repos and feel free to ask any questions. We will actively work on them as we will the development of the exporters. + ### Build Build without running tests using the following. diff --git a/examples/README.md b/examples/README.md index 3e26235..ae4198a 100644 --- a/examples/README.md +++ b/examples/README.md @@ -4,22 +4,3 @@ Please refer to the Unified Observability in Grafana with converged Oracle Database Workshop at http://bit.ly/unifiedobservability and it's corresponding repos https://github.com/oracle/microservices-datadriven/tree/main/grabdish/observability/db-metrics-exporter for complete examples. More examples will be provided here in the near future. - -# Metrics exporter - -1. Pre-req. Run setup for the GrabDish workshop including observability lab steps to install and configure Grafana and Prometheus -2. Run `./deploy.sh` in this directory -3. `curl http://observability-exporter-example:8080/metrics` from within cluster to see Prometheus stats -4. View same stats from within Grafana by loading AQ dashboard - -The same can be done above for TEW by simply replace `aq` with `teq` in the deployment and configmap yamls - -Troubleshooting... - -kubectl port-forward prometheus-stable-kube-prometheus-sta-prometheus-0 -n msdataworkshop 9090:9090 - -# Logs exporter - -# Trace exporter - -# Combined Metrics, Logs, and Trace exporter \ No newline at end of file diff --git a/examples/deploy.sh b/examples/deploy.sh deleted file mode 100755 index 182328b..0000000 --- a/examples/deploy.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/bin/bash -## Copyright (c) 2021 Oracle and/or its affiliates. -## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ - -SCRIPT_DIR=$(dirname $0) - -if [ -z "$DOCKER_REGISTRY" ]; then - echo "DOCKER_REGISTRY not set. Will get it with state_get" - export DOCKER_REGISTRY=$(state_get DOCKER_REGISTRY) -fi - -if [ -z "$DOCKER_REGISTRY" ]; then - echo "Error: DOCKER_REGISTRY env variable needs to be set!" - exit 1 -fi - -if [ -z "$ORDER_DB_NAME" ]; then - echo "ORDER_DB_NAME not set. Will get it with state_get" - export ORDER_DB_NAME=$(state_get ORDER_DB_NAME) -fi - -if [ -z "$ORDER_DB_NAME" ]; then - echo "Error: ORDER_DB_NAME env variable needs to be set!" - exit 1 -fi - -echo create configmap for db-metrics-banka-exporter... -kubectl delete configmap db-metrics-banka-exporter-config -n msdataworkshop -kubectl create configmap db-metrics-banka-exporter-config --from-file=db-metrics-banka-exporter-metrics.toml -n msdataworkshop -echo -echo create db-metrics-exporter deployment and service... -export CURRENTTIME=generated -#export CURRENTTIME=$( date '+%F_%H:%M:%S' ) -echo CURRENTTIME is $CURRENTTIME ...this will be appended to generated deployment yaml - -cp db-metrics-exporter-deployment.yaml db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml - -#sed -e "s|%DOCKER_REGISTRY%|${DOCKER_REGISTRY}|g" db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml > /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml -#mv -- /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml -sed -e "s|%EXPORTER_NAME%|example|g" db-metrics-exporter-banka-deployment-${CURRENTTIME}.yaml > /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml -mv -- /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml -sed -e "s|%PDB_NAME%|${ORDER_DB_NAME}|g" db-metrics-exporter-banka-deployment-${CURRENTTIME}.yaml > /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml -mv -- /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml -sed -e "s|%USER%|aquser|g" db-metrics-exporter-banka-deployment-${CURRENTTIME}.yaml > /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml -mv -- /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml -sed -e "s|%db-wallet-secret%|order-db-tns-admin-secret|g" db-metrics-exporter-banka-deployment-${CURRENTTIME}.yaml > /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml -mv -- /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml -#sed -e "s|${OCI_REGION-}|${OCI_REGION}|g" db-metrics-exporter-banka-deployment-${CURRENTTIME}.yaml > /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml -#mv -- /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml -#sed -e "s|${VAULT_SECRET_OCID-}|${VAULT_SECRET_OCID}|g" db-metrics-exporter-banka-deployment-${CURRENTTIME}.yaml > /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml -#mv -- /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml - - -kubectl delete configmap observability-exporter-example-config -n msdataworkshop - -kubectl create configmap observability-exporter-example-config --from-file=aq-metrics.toml -n msdataworkshop - -kubectl apply -f observability-exporter-example-deployment-test.yaml -n msdataworkshop - -kubectl apply -f observability-exporter-example-service.yaml -n msdataworkshop - -kubectl apply -f observability-exporter-example-service-monitor.yaml -n msdataworkshop diff --git a/examples/observability-exporter-example-service-monitor.yaml b/examples/observability-exporter-example-service-monitor.yaml deleted file mode 100644 index 4fda463..0000000 --- a/examples/observability-exporter-example-service-monitor.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: prometheus-observability-exporter-example - labels: - app: observability-exporter-example - release: stable -spec: - endpoints: - - interval: 5s - port: metrics - selector: - matchLabels: - app: observability-exporter-example \ No newline at end of file diff --git a/examples/observability-exporter-example-service.yaml b/examples/observability-exporter-example-service.yaml deleted file mode 100644 index c4479d8..0000000 --- a/examples/observability-exporter-example-service.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: observability-exporter-example - labels: - app: observability-exporter-example -spec: - type: NodePort - ports: - - port: 8080 - name: metrics - targetPort: 8080 - selector: - app: observability-exporter-example diff --git a/src/main/java/oracle/observability/ObservabilityExporter.java b/src/main/java/oracle/observability/ObservabilityExporter.java index 741edf8..f0209c2 100644 --- a/src/main/java/oracle/observability/ObservabilityExporter.java +++ b/src/main/java/oracle/observability/ObservabilityExporter.java @@ -26,6 +26,8 @@ public class ObservabilityExporter { public String VAULT_SECRET_OCID = System.getenv("VAULT_SECRET_OCID"); //eg ocid.... public String OCI_CONFIG_FILE = System.getenv("OCI_CONFIG_FILE"); //eg "~/.oci/config" public String OCI_PROFILE = System.getenv("OCI_PROFILE"); //eg "DEFAULT" + public static final String CONTEXT = "context"; + public static final String REQUEST = "request"; PoolDataSource observabilityDB; public PoolDataSource getPoolDataSource() throws SQLException { diff --git a/src/main/java/oracle/observability/logs/LogsExporter.java b/src/main/java/oracle/observability/logs/LogsExporter.java index 7eb2012..2b8890c 100644 --- a/src/main/java/oracle/observability/logs/LogsExporter.java +++ b/src/main/java/oracle/observability/logs/LogsExporter.java @@ -18,12 +18,16 @@ @RestController public class LogsExporter extends ObservabilityExporter implements Runnable { - private static final org.slf4j.Logger LOG = LoggerFactory.getLogger(LogsExporter.class); + private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(LogsExporter.class); + public static final String TIMESTAMPFIELD = "timestampfield"; + public static final String LOG = "log"; public String LOG_INTERVAL = System.getenv("LOG_INTERVAL"); // "30s" private int logInterval = 30; List lastLogged = new ArrayList<>(); private java.sql.Timestamp alertLogQueryLastLocalDateTime; + private int consecutiveExceptionCount = 0; //used to backoff todo should be a finer/log entry level rather than global + @PostConstruct public void init() throws Exception { @@ -34,15 +38,17 @@ public void init() throws Exception { public void run() { while (true) { try { - LOG.debug("LogExporter default metrics from:" + DEFAULT_METRICS); + Thread.sleep(consecutiveExceptionCount * 1000); + Thread.sleep(logInterval * 1000); + LOGGER.debug("LogsExporter default metrics from:" + DEFAULT_METRICS); if(LOG_INTERVAL!=null && !LOG_INTERVAL.trim().equals("")) logInterval = Integer.getInteger(LOG_INTERVAL); - LOG.debug("LogExporter logInterval:" + logInterval); + LOGGER.debug("LogsExporter logInterval:" + logInterval); File tomlfile = new File(DEFAULT_METRICS); TomlMapper mapper = new TomlMapper(); JsonNode jsonNode = mapper.readerFor(LogsExporterConfigEntry.class).readTree(new FileInputStream(tomlfile)); - JsonNode log = jsonNode.get("log"); + JsonNode log = jsonNode.get(LOG); if(log == null || log.isEmpty()) { - LOG.info("No logs records configured"); + LOGGER.info("No logs records configured"); return; } Iterator logs = log.iterator(); @@ -50,10 +56,15 @@ public void run() { try (Connection connection = getPoolDataSource().getConnection()) { while (logs.hasNext()) { //for each "log" entry in toml/config... JsonNode next = logs.next(); - String request = next.get("request").asText(); // the sql query - LOG.debug("DBLogsExporter. request:" + request); - String timestampfield = next.get("timestampfield").asText(); // eg ORIGINATING_TIMESTAMP - LOG.debug("DBLogsExporter. timestampfield:" + timestampfield); + String request = next.get(REQUEST).asText(); // the sql query + LOGGER.debug("LogsExporter request:" + request); + JsonNode timestampfieldNode = next.get(TIMESTAMPFIELD); + if (timestampfieldNode==null) { + LOGGER.warn("LogsExporter entry does not contain `timestampfield' value request:" + request); + continue; + } + String timestampfield = timestampfieldNode.asText(); // eg ORIGINATING_TIMESTAMP + LOGGER.debug("LogsExporter timestampfield:" + timestampfield); PreparedStatement statement = connection.prepareStatement( alertLogQueryLastLocalDateTime == null ? request : request + " WHERE " + timestampfield + " > ?"); if(alertLogQueryLastLocalDateTime!=null) statement.setTimestamp(1, alertLogQueryLastLocalDateTime); @@ -82,10 +93,11 @@ public void run() { } } lastLogged = currentLogged; + consecutiveExceptionCount = 0; } - Thread.sleep(logInterval * 1000); } catch (Exception e) { - throw new RuntimeException(e); + consecutiveExceptionCount++; + LOGGER.warn("LogsExporter.processMetric exception:" + e); } } } diff --git a/src/main/java/oracle/observability/metrics/MetricsExporter.java b/src/main/java/oracle/observability/metrics/MetricsExporter.java index 5ba606e..5eb87e1 100644 --- a/src/main/java/oracle/observability/metrics/MetricsExporter.java +++ b/src/main/java/oracle/observability/metrics/MetricsExporter.java @@ -24,13 +24,19 @@ @RestController public class MetricsExporter extends ObservabilityExporter { + private static final Logger LOGGER = LoggerFactory.getLogger(MetricsExporter.class); + public static final String UP = "up"; + public static final String METRICSTYPE = "metricstype"; + public static final String METRICSDESC = "metricsdesc"; + public static final String LABELS = "labels"; + public static final String IGNOREZERORESULT = "ignorezeroresult"; + public static final String FALSE = "false"; public String LISTEN_ADDRESS = System.getenv("LISTEN_ADDRESS"); // ":9161" public String TELEMETRY_PATH = System.getenv("TELEMETRY_PATH"); // "/metrics" //Interval between each scrape. Default is to scrape on collect requests. scrape.interval public String SCRAPE_INTERVAL = System.getenv("scrape.interval"); // "0s" public static final String ORACLEDB_METRIC_PREFIX = "oracledb_"; Map gaugeMap = new HashMap<>(); - private static final Logger LOG = LoggerFactory.getLogger(MetricsExporter.class); /** * The endpoint that prometheus will scrape @@ -54,7 +60,7 @@ private void processMetrics() throws IOException, SQLException { JsonNode jsonNode = mapper.readerFor(MetricsExporterConfigEntry.class).readTree(new FileInputStream(tomlfile)); JsonNode metric = jsonNode.get("metric"); if(metric == null || metric.isEmpty()) { - LOG.info("No logs records configured"); + LOGGER.info("No logs records configured"); return; } Iterator metrics = metric.iterator(); @@ -65,11 +71,11 @@ private void processMetrics() throws IOException, SQLException { processMetric(connection, metrics); } } finally { - Gauge gauge = gaugeMap.get(ORACLEDB_METRIC_PREFIX + "up"); + Gauge gauge = gaugeMap.get(ORACLEDB_METRIC_PREFIX + UP); if (gauge == null) { - Gauge upgauge = Gauge.build().name(ORACLEDB_METRIC_PREFIX + "up").help("Whether the Oracle database server is up.").register(); + Gauge upgauge = Gauge.build().name(ORACLEDB_METRIC_PREFIX + UP).help("Whether the Oracle database server is up.").register(); upgauge.set(isConnectionSuccessful); - gaugeMap.put(ORACLEDB_METRIC_PREFIX + "up", upgauge); + gaugeMap.put(ORACLEDB_METRIC_PREFIX + UP, upgauge); } else gauge.set(isConnectionSuccessful); } } @@ -87,10 +93,9 @@ private void processMetrics() throws IOException, SQLException { */ private void processMetric(Connection connection, Iterator metric) { JsonNode next = metric.next(); - //todo ignore case - String context = next.get("context").asText(); // eg context = "teq" - String metricsType = next.get("metricstype") == null ? "" :next.get("metricstype").asText(); - JsonNode metricsdescNode = next.get("metricsdesc"); + String context = next.get(CONTEXT).asText(); // eg context = "teq" + String metricsType = next.get(METRICSTYPE) == null ? "" :next.get(METRICSTYPE).asText(); + JsonNode metricsdescNode = next.get(METRICSDESC); // eg metricsdesc = { enqueued_msgs = "Total enqueued messages.", dequeued_msgs = "Total dequeued messages.", remained_msgs = "Total remained messages."} Iterator> metricsdescIterator = metricsdescNode.fields(); Map metricsDescMap = new HashMap<>(); @@ -98,19 +103,19 @@ private void processMetric(Connection connection, Iterator metric) { Map.Entry metricsdesc = metricsdescIterator.next(); metricsDescMap.put(metricsdesc.getKey(), metricsdesc.getValue().asText()); } - LOG.debug("context:" + context); + LOGGER.debug("context:" + context); String[] labelNames = new String[0]; - if (next.get("labels") != null) { - int size = next.get("labels").size(); - Iterator labelIterator = next.get("labels").iterator(); + if (next.get(LABELS) != null) { + int size = next.get(LABELS).size(); + Iterator labelIterator = next.get(LABELS).iterator(); labelNames = new String[size]; for (int i = 0; i < size; i++) { labelNames[i] = labelIterator.next().asText(); } - LOG.debug("\n"); + LOGGER.debug("\n"); } - String request = next.get("request").asText(); // the sql query - String ignorezeroresult = next.get("ignorezeroresult") == null ? "false" : next.get("ignorezeroresult").asText(); //todo, currently defaults to true + String request = next.get(REQUEST).asText(); // the sql query + String ignorezeroresult = next.get(IGNOREZERORESULT) == null ? FALSE : next.get(IGNOREZERORESULT).asText(); //todo, currently defaults to true ResultSet resultSet; try { resultSet = connection.prepareStatement(request).executeQuery(); @@ -118,7 +123,7 @@ private void processMetric(Connection connection, Iterator metric) { translateQueryToPrometheusMetric(context, metricsDescMap, labelNames, resultSet); } } catch(SQLException e) { //this can be due to table not existing etc. - LOG.warn("DBMetricsExporter.processMetric during:" + request + " exception:" + e); + LOGGER.warn("MetricsExporter.processMetric during:" + request + " exception:" + e); return; } } @@ -127,7 +132,7 @@ private void translateQueryToPrometheusMetric(String context, Map sqlQueryResults = + Map sqlQueryResults = extractGaugesAndLabelValues(context, metricsDescMap, labelNames, resultSet, labelValues, resultSet.getMetaData().getColumnCount()); setLabelValues(context, labelNames, labelValues, sqlQueryResults.entrySet().iterator()); } @@ -142,10 +147,10 @@ private void translateQueryToPrometheusMetric(String context, Map extractGaugesAndLabelValues( + private Map extractGaugesAndLabelValues( String context, Map metricsDescMap, String[] labelNames, ResultSet resultSet, String[] labelValues, int columnCount) throws SQLException { - Map sqlQueryResults = new HashMap<>(); + Map sqlQueryResults = new HashMap<>(); String columnName; String columnTypeName; for (int i = 0; i < columnCount; i++) { //for each column... @@ -154,9 +159,9 @@ private Map extractGaugesAndLabelValues( if (columnTypeName.equals("VARCHAR2")) //. typename is 2/NUMBER or 12/VARCHAR2 ; else - sqlQueryResults.put(resultSet.getMetaData().getColumnName(i + 1), resultSet.getInt(i + 1)); + sqlQueryResults.put(resultSet.getMetaData().getColumnName(i + 1), resultSet.getLong(i + 1)); String gaugeName = ORACLEDB_METRIC_PREFIX + context + "_" + columnName; - LOG.debug("---gaugeName:" + gaugeName); + LOGGER.debug("---gaugeName:" + gaugeName); Gauge gauge = gaugeMap.get(gaugeName); if (gauge == null) { if(metricsDescMap.containsKey(columnName)) { @@ -173,9 +178,9 @@ private Map extractGaugesAndLabelValues( return sqlQueryResults; } - private void setLabelValues(String context, String[] labelNames, String[] labelValues, Iterator> sqlQueryRestulsEntryIterator) { + private void setLabelValues(String context, String[] labelNames, String[] labelValues, Iterator> sqlQueryRestulsEntryIterator) { while(sqlQueryRestulsEntryIterator.hasNext()) { //for each column - Map.Entry sqlQueryResultsEntry = sqlQueryRestulsEntryIterator.next(); + Map.Entry sqlQueryResultsEntry = sqlQueryRestulsEntryIterator.next(); boolean isLabel = false; for (int ii = 0; ii< labelNames.length; ii++) { if(labelNames[ii].equals(sqlQueryResultsEntry.getKey())) isLabel =true; // continue @@ -186,7 +191,7 @@ private void setLabelValues(String context, String[] labelNames, String[] labelV try { gaugeMap.get(ORACLEDB_METRIC_PREFIX + context + "_" + sqlQueryResultsEntry.getKey().toLowerCase()).labels(labelValues).set(valueToSet); } catch (Exception ex) { //todo filter to avoid unnecessary exception handling - LOG.debug("OracleDBMetricsExporter.translateQueryToPrometheusMetric Exc:" + ex); + LOGGER.debug("OracleDBMetricsExporter.translateQueryToPrometheusMetric Exc:" + ex); } else gaugeMap.get(ORACLEDB_METRIC_PREFIX + context + "_" + sqlQueryResultsEntry.getKey().toLowerCase()).set(valueToSet); } diff --git a/src/main/java/oracle/observability/tracing/TracingExporter.java b/src/main/java/oracle/observability/tracing/TracingExporter.java index 9133aff..c943ef2 100644 --- a/src/main/java/oracle/observability/tracing/TracingExporter.java +++ b/src/main/java/oracle/observability/tracing/TracingExporter.java @@ -45,10 +45,11 @@ @RestController public final class TracingExporter extends ObservabilityExporter implements Runnable { - private static final org.slf4j.Logger LOG = LoggerFactory.getLogger(TracingExporter.class); + private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(TracingExporter.class); + public static final String ECID = "ECID"; public static final String ECID_BIND_VALUES = "ECID_BIND_VALUES"; private static final String ECID_BIND_VALUES_GETSQLID_SQL = - "select ECID, SQL_ID from GV$SESSION where ECID IS NOT NULL"; + "select ECID, SQL_ID from GV$ACTIVE_SESSION_HISTORY where ECID IS NOT NULL"; private static final String ECID_BIND_VALUES_GETBINDING_SQL = "SELECT sql_id, t.sql_text sql_text, b.name bind_name, b.value_string bind_value " + "FROM gv$sql t " + @@ -56,9 +57,15 @@ public final class TracingExporter extends ObservabilityExporter implements Runn "WHERE b.value_string is not null AND sql_id = ? "; public static final String OTEL_JAEGER_ORACLEDBTRACER = "otel-jaeger-oracledbtracer"; public static final String HTTP_JAEGER_COLLECTOR_MSDATAWORKSHOP_14268 = "http://jaeger-collector.msdataworkshop:14268"; //default + public static final String TEMPLATE = "template"; + public static final String SQL_ID = "SQL_ID"; + public static final String SQL_TEXT = "sql_text"; + public static final String BIND_VALUE = "bind_value"; + public static final String ORACLEDB_TracingExporter = "oracledb_TracingExporter_"; public String TRACE_COLLECTOR_ADDRESS = System.getenv("TRACE_COLLECTOR_ADDRESS"); // "http://jaeger-collector.msdataworkshop:14268" "http://localhost:14250" public String TRACE_INTERVAL = System.getenv("TRACE_INTERVAL"); // "30s" - private int traceInterval = 30; + private int traceInterval; + private int traceIntervalDefault = 30; private static OpenTelemetry openTelemetry; private static Tracer tracer; public static TextMapPropagator TEXT_MAP_PROPAGATOR; @@ -74,10 +81,11 @@ public void init() { @Override public void run() { - LOG.debug("TraceExporter DEFAULT_METRICS:" + DEFAULT_METRICS); + LOGGER.debug("TracingExporter DEFAULT_METRICS:" + DEFAULT_METRICS); if (TRACE_INTERVAL != null && !TRACE_INTERVAL.trim().equals("")) traceInterval = Integer.getInteger(TRACE_INTERVAL); - LOG.debug("TraceExporter traceInterval:" + traceInterval); + else traceInterval = traceIntervalDefault; + LOGGER.debug("TracingExporter traceInterval:" + traceInterval); File tomlfile = new File(DEFAULT_METRICS); TomlMapper mapper = new TomlMapper(); JsonNode jsonNode; @@ -88,27 +96,26 @@ public void run() { } JsonNode trace = jsonNode.get("trace"); if (trace == null || trace.isEmpty()) { - LOG.info("No trace records configured"); + LOGGER.info("No trace records configured"); return; } Iterator traces = trace.iterator(); if(!traces.hasNext()) return; while (true) { try (Connection connection = getPoolDataSource().getConnection()) { - while (traces.hasNext()) { //for each "log" entry in toml/config... + while (traces.hasNext()) { JsonNode next = traces.next(); - String context = next.get("context").asText(); // the sql query - String request = next.get("request").asText(); // the sql query - String template = next.get("template").asText(); // the sql query - LOG.debug("DBTracingExporter.request:" + request); - LOG.debug("DBTracingExporter.template:" + template); + String context = next.get(CONTEXT).asText(); + String request = next.get(REQUEST).asText(); + String template = next.get(TEMPLATE).asText(); + LOGGER.debug("TracingExporter request:" + request); if (template != null && template.equals(ECID_BIND_VALUES)) { ecidTraces(connection, context); } } Thread.sleep(traceInterval * 1000); } catch (Exception e) { - throw new RuntimeException(e); + LOGGER.warn("TracingExporter.processMetric exception:" + e); } } } @@ -116,42 +123,45 @@ public void run() { void ecidTraces(Connection connection, String configContextName) throws SQLException { PreparedStatement preparedStatement = connection.prepareStatement(ECID_BIND_VALUES_GETSQLID_SQL); ResultSet rs = preparedStatement.executeQuery(); - while (rs.next()) { - String traceparent = rs.getString("ECID"); - String SQL_ID = rs.getString("SQL_ID"); +// while (rs.next()) { + rs.next(); + String traceparent = rs.getString(ECID); + LOGGER.debug("TracingExporter traceparent:" + traceparent); + String sqlID = rs.getString(SQL_ID); String getbindingSQL = ECID_BIND_VALUES_GETBINDING_SQL; PreparedStatement sqlTextPS = connection.prepareStatement(getbindingSQL); - sqlTextPS.setString(1, SQL_ID); + sqlTextPS.setString(1, sqlID); ResultSet sqlTextPSrs = sqlTextPS.executeQuery(); - String SQL_TEXT = ""; - String SQL_BIND = ""; + String sqlText = ""; + String sqlBind = ""; while (sqlTextPSrs.next()) { - SQL_TEXT = sqlTextPSrs.getString("sql_text"); - SQL_BIND = sqlTextPSrs.getString("bind_value"); + sqlText = sqlTextPSrs.getString(SQL_TEXT); + sqlBind = sqlTextPSrs.getString(BIND_VALUE); } - if (!processedTraces.contains(traceparent)) { - LOG.debug("processing ecid/traceparent:" + traceparent); - LOG.debug("processing SQL_ID:" + SQL_ID); - LOG.debug("processing SQL_TEXT:" + SQL_TEXT); - LOG.debug("processing SQL_BIND:" + SQL_BIND); + if (!processedTraces.contains(traceparent)) { //todo check contents as well + LOGGER.debug("processing ecid/traceparent:" + traceparent); + LOGGER.debug("processing SQL_ID:" + sqlID); + LOGGER.debug("processing SQL_TEXT:" + sqlText); + LOGGER.debug("processing SQL_BIND:" + sqlBind); Context context = TEXT_MAP_PROPAGATOR.extract(Context.current(), null, getTextMapGetter(traceparent)); - LOG.debug("context:" + context); + LOGGER.debug("context:" + context); Span childSpan = - tracer.spanBuilder("oracledb_traceexporter_" + configContextName).setParent(context).setSpanKind(SpanKind.SERVER).startSpan(); - LOG.debug("childSpan:" + childSpan); + tracer.spanBuilder(ORACLEDB_TracingExporter + configContextName) + .setParent(context).setSpanKind(SpanKind.SERVER).startSpan(); + LOGGER.debug("childSpan:" + childSpan); try (Scope scope = childSpan.makeCurrent()) { - childSpan.setAttribute("SQL_ID", SQL_ID); - childSpan.setAttribute("SQL_TEXT", SQL_TEXT); - childSpan.setAttribute("SQL_BIND", SQL_BIND); - childSpan.addEvent("SQL_ID:" + SQL_ID); - childSpan.addEvent("SQL_TEXT:" + SQL_TEXT); - childSpan.addEvent("SQL_BIND:" + SQL_BIND); + childSpan.setAttribute(SQL_ID, sqlID); + childSpan.setAttribute("SQL_TEXT", sqlText); + childSpan.setAttribute("SQL_BIND", sqlBind); + childSpan.addEvent("SQL_ID:" + sqlID); + childSpan.addEvent("SQL_TEXT:" + sqlText); + childSpan.addEvent("SQL_BIND:" + sqlBind); processedTraces.add(traceparent); } finally { childSpan.end(); } } - } +// } } private TextMapGetter getTextMapGetter(String traceparent) { @@ -171,6 +181,7 @@ public String get(HttpExchange carrier, String key) { private OpenTelemetry initOpenTelemetry() { String traceCollectorAddress = TRACE_COLLECTOR_ADDRESS == null || TRACE_COLLECTOR_ADDRESS.trim().equals("") ? HTTP_JAEGER_COLLECTOR_MSDATAWORKSHOP_14268 :TRACE_COLLECTOR_ADDRESS; + LOGGER.warn("TracingExporter traceCollectorAddress:" + traceCollectorAddress); JaegerGrpcSpanExporter jaegerExporter = JaegerGrpcSpanExporter.builder() .setEndpoint(traceCollectorAddress) From 574e61a79b329a9a9c5a302bcb12159847a525b6 Mon Sep 17 00:00:00 2001 From: Paul Parkinson Date: Sun, 3 Jul 2022 17:51:32 -0400 Subject: [PATCH 4/8] README.md updates --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 9957d61..c48ec6f 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,10 @@ # Unified Observability with Oracle Database -This v1 (preview) distribution contains scripts and code for exporting metrics, logs, and traces from any Oracle Database to provide converged observability for data-centric applications. +This distribution contains scripts and code for exporting metrics, logs, and traces from any Oracle Database to provide converged observability for data-centric applications. Metrics from the application layer, Kubernetes, and Oracle Database can be combined to provide unified observability to developers within a single Grafana console. -All three exporters (metrics, log, and trace) can be configured in the same file and each is explanined in the corresponding doc pages: +All three exporters (metrics, log, and trace) can be configured in the same file and each is explained in the corresponding doc pages: [Metrics Exporter][Metrics Exporter] @@ -15,7 +15,7 @@ All three exporters (metrics, log, and trace) can be configured in the same file The old version of the metrics exporter can be found in the [old implementation branch][old implementation branch] and the new metrics exporter implementation is backward compatible such that the same configuration for both database connection and metrics definition can be used. -Users are encouraged to open issues and enhancements requests against this github repos and feel free to ask any questions. We will actively work on them as we will the development of the exporters. +Users are encouraged to open issues and enhancements requests against this repos and feel free to ask any questions. We will actively work on them as we will the development of the exporters. ### Build From dafa3552f351c3b44da84f39e921a28ec6f97e8e Mon Sep 17 00:00:00 2001 From: Paul Parkinson Date: Sun, 3 Jul 2022 17:56:48 -0400 Subject: [PATCH 5/8] README.md updates --- README.md | 6 +++++- build-and-push.sh | 38 -------------------------------------- 2 files changed, 5 insertions(+), 39 deletions(-) delete mode 100755 build-and-push.sh diff --git a/README.md b/README.md index c48ec6f..c51d98b 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,11 @@ All three exporters (metrics, log, and trace) can be configured in the same file The old version of the metrics exporter can be found in the [old implementation branch][old implementation branch] and the new metrics exporter implementation is backward compatible such that the same configuration for both database connection and metrics definition can be used. -Users are encouraged to open issues and enhancements requests against this repos and feel free to ask any questions. We will actively work on them as we will the development of the exporters. +Users are encouraged to open issues and enhancements requests against this repos (https://github.com/oracle/oracle-db-appdev-monitoring/issues ) and are encouraged to ask any questions. We will actively work on them as we will the development of the exporters. + +### Pull + +The image can be pulled via `docker pull container-registry.oracle.com/database/observability-exporter:0.1.0` ### Build diff --git a/build-and-push.sh b/build-and-push.sh deleted file mode 100755 index 90437b3..0000000 --- a/build-and-push.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash -## Copyright (c) 2022 Oracle and/or its affiliates. -## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ - - -export IMAGE_NAME=observability-exporter -export IMAGE_VERSION=0.1.0 - -if [ -z "$DOCKER_REGISTRY" ]; then - echo "DOCKER_REGISTRY not set." -fi - -export IMAGE=${DOCKER_REGISTRY}/${IMAGE_NAME}:${IMAGE_VERSION} - -mvn clean package -DskipTests -docker build -t=$IMAGE . - -export IS_CREATE_REPOS=$1 -if [ -z "IS_CREATE_REPOS" ]; then - echo "not creating OCIR repos" -else - echo "creating OCIR repos and setting to public" - if [ -z "COMPARTMENT_OCID" ]; then - echo "COMPARTMENT_OCID not set. Will get it with state_get" - export COMPARTMENT_OCID=$(state_get COMPARTMENT_OCID) - fi - if [ -z "RUN_NAME" ]; then - echo "RUN_NAME not set. Will get it with state_get" - export RUN_NAME=$(state_get RUN_NAME) - fi -# RUN_NAME is randomly generated name from workshop, eg gd4930131 - oci artifacts container repository create --compartment-id "$COMPARTMENT_OCID" --display-name "$RUN_NAME/$IMAGE_NAME" --is-public true -fi - -docker push "$IMAGE" -if [ $? -eq 0 ]; then - docker rmi "$IMAGE" -fi From 72a2c4a0d729205d611e3ea40522c004ec6c8220 Mon Sep 17 00:00:00 2001 From: Paul Parkinson Date: Wed, 3 Aug 2022 19:06:55 -0400 Subject: [PATCH 6/8] basic example --- examples/README.md | 17 +++++++ ...ied-observability-exporter-deployment.yaml | 49 +++++++++++++++++++ ...nified-observability-exporter-service.yaml | 14 ++++++ ...observability-exporter-servicemonitor.yaml | 14 ++++++ ...-redeploy-unified-observabiity-exporter.sh | 17 +++++++ 5 files changed, 111 insertions(+) create mode 100644 examples/unified-observability-exporter-deployment.yaml create mode 100644 examples/unified-observability-exporter-service.yaml create mode 100644 examples/unified-observability-exporter-servicemonitor.yaml create mode 100755 examples/update-and-redeploy-unified-observabiity-exporter.sh diff --git a/examples/README.md b/examples/README.md index ae4198a..319745d 100644 --- a/examples/README.md +++ b/examples/README.md @@ -3,4 +3,21 @@ Please refer to the Unified Observability in Grafana with converged Oracle Database Workshop at http://bit.ly/unifiedobservability and it's corresponding repos https://github.com/oracle/microservices-datadriven/tree/main/grabdish/observability/db-metrics-exporter for complete examples. +A simple setup in Kubernetes involves the following steps (with the assumption that Prometheus is already installed) + +1. Change the %EXPORTER_NAME% value in all yaml files in this directory. This can be any value such as "helloworld". + +2. Change the database connection information in the unified-observability-exporter-deployment.yaml file. + - The only value required is the DATA_SOURCE_NAME which takes the format `USER/PASSWORD@DB_SERVICE_URL` + - In the example the connection information is obtained from a mount created from the wallet obtained from a Kubernetes secret named `%db-wallet-secret%` + - In the example the password is obtained from a Kubernetes secret named `dbuser` + +3. Copy a config file to unified-observability-%EXPORTER_NAME%-exporter-metrics.toml in currently directly + - Eg, `cp ../metrics/aq-metrics.toml unified-observability-helloworld-exporter-metrics.toml` + - This will be used to create a configmap that is referenced in the deployment. + +4. Run `./update-and-redeploy-unified-observabiity-exporter.sh` + +5. You should see metrics being exported from within the container at http://localhost:9161/metrics and likewise from the Kubnernetes service at http://unified-observability-exporter-service-%EXPORTER_NAME%:9161/metrics + More examples will be provided here in the near future. diff --git a/examples/unified-observability-exporter-deployment.yaml b/examples/unified-observability-exporter-deployment.yaml new file mode 100644 index 0000000..02ff0c5 --- /dev/null +++ b/examples/unified-observability-exporter-deployment.yaml @@ -0,0 +1,49 @@ +## Copyright (c) 2021 Oracle and/or its affiliates. +## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: unified-observability-exporter-%EXPORTER_NAME% +spec: + replicas: 1 + selector: + matchLabels: + app: unified-observability-exporter-%EXPORTER_NAME% + template: + metadata: + labels: + app: unified-observability-exporter-%EXPORTER_NAME% + spec: + containers: + - name: unified-observability-exporter-%EXPORTER_NAME% + image: container-registry.oracle.com/database/observability-exporter:0.1.0 + imagePullPolicy: Always + env: + - name: DEFAULT_METRICS + value: /observability/unified-observability-%EXPORTER_NAME%-exporter-metrics.toml + - name: TNS_ADMIN + value: "/creds" + - name: dbpassword + valueFrom: + secretKeyRef: + name: dbuser + key: dbpassword + optional: true + - name: DATA_SOURCE_NAME + value: "%USER%/$(dbpassword)@%PDB_NAME%_tp" + volumeMounts: + - name: creds + mountPath: /creds + - name: config-volume + mountPath: /observability/unified-observability-%EXPORTER_NAME%-exporter-metrics.toml + subPath: unified-observability-%EXPORTER_NAME%-exporter-metrics.toml + ports: + - containerPort: 8080 + restartPolicy: Always + volumes: + - name: creds + secret: + secretName: %db-wallet-secret% + - name: config-volume + configMap: + name: unified-observability-%EXPORTER_NAME%-exporter-config \ No newline at end of file diff --git a/examples/unified-observability-exporter-service.yaml b/examples/unified-observability-exporter-service.yaml new file mode 100644 index 0000000..cc03655 --- /dev/null +++ b/examples/unified-observability-exporter-service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: unified-observability-exporter-service-%EXPORTER_NAME% + labels: + app: unified-observability-exporter-%EXPORTER_NAME% + release: stable +spec: + type: NodePort + ports: + - port: 9161 + name: metrics + selector: + app: unified-observability-exporter-%EXPORTER_NAME% diff --git a/examples/unified-observability-exporter-servicemonitor.yaml b/examples/unified-observability-exporter-servicemonitor.yaml new file mode 100644 index 0000000..2d44b96 --- /dev/null +++ b/examples/unified-observability-exporter-servicemonitor.yaml @@ -0,0 +1,14 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: prometheus-unified-observability-exporter-%EXPORTER_NAME% + labels: + app: unified-observability-exporter-%EXPORTER_NAME% + release: stable +spec: + endpoints: + - interval: 20s + port: metrics + selector: + matchLabels: + app: unified-observability-exporter-%EXPORTER_NAME% \ No newline at end of file diff --git a/examples/update-and-redeploy-unified-observabiity-exporter.sh b/examples/update-and-redeploy-unified-observabiity-exporter.sh new file mode 100755 index 0000000..5c6795e --- /dev/null +++ b/examples/update-and-redeploy-unified-observabiity-exporter.sh @@ -0,0 +1,17 @@ +#!/bin/bash +## Copyright (c) 2021 Oracle and/or its affiliates. +## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +# add namespace if/as appropriate, eg `kubectl apply -f unified-observability-exporter-deployment.yaml-n mynamespace` + +echo delete previous deployment so that deployment is reapplied/deployed after configmap changes for exporter are made... +kubectl delete deployment db-metrics-exporter-orderpdb + +echo create configmap for unified-observability-exporter... +kubectl delete configmap unified-observability-exporter-config +kubectl create configmap unified-observability-exporter-config --from-file=unified-observability-%EXPORTER_NAME%-exporter-metrics.toml + +kubectl apply -f unified-observability-exporter-deployment.yaml +# the following are unnecessary after initial deploy but in order to keep to a single bash script... +kubectl apply -f unified-observability-exporter-service.yaml +kubectl apply -f unified-observability-exporter-servicemonitor.yaml From 9e1649a7cae15ea347c147b6f5ecc6654ab900de Mon Sep 17 00:00:00 2001 From: Paul Parkinson Date: Thu, 27 Apr 2023 19:09:00 -0400 Subject: [PATCH 7/8] adds... 1. ability to specify DATA_SOURCE_USER, DATA_SOURCE_PASSWORD, and DATA_SOURCE_SERVICENAME individually rather than as one DATA_SOURCE_NAME entry (thus more secure) 2. multi-database support via "/scrape" endpoint with "target" param taking dns for specific database (this will soon be enhanced so that it is instead configurable, again to be more secure) . Also, updated POM entries for 21.7 version. --- build.sh | 1 + examples/metrics/default-metrics.toml | 6 ++ pom.xml | 26 +++++++- .../observability/ObservabilityExporter.java | 62 +++++++++++++++---- .../metrics/MetricsExporter.java | 57 ++++++++--------- 5 files changed, 108 insertions(+), 44 deletions(-) create mode 100644 examples/metrics/default-metrics.toml diff --git a/build.sh b/build.sh index e7ccba2..41c5f0e 100755 --- a/build.sh +++ b/build.sh @@ -15,3 +15,4 @@ export IMAGE=${DOCKER_REGISTRY}/${IMAGE_NAME}:${IMAGE_VERSION} mvn clean package -DskipTests docker build -t=$IMAGE . +docker push $IMAGE diff --git a/examples/metrics/default-metrics.toml b/examples/metrics/default-metrics.toml new file mode 100644 index 0000000..b7d8f7f --- /dev/null +++ b/examples/metrics/default-metrics.toml @@ -0,0 +1,6 @@ +[[metric]] +context = "sessions" +labels = ["inst_id", "status", "type"] +metricsdesc = { value = "Gauge metric with count of sessions by status and type." } +request = "select inst_id, status, type, count(*) as value from gv$session group by status, type, inst_id" +ignorezeroresult = true \ No newline at end of file diff --git a/pom.xml b/pom.xml index 49fa829..e6c9743 100644 --- a/pom.xml +++ b/pom.xml @@ -15,6 +15,7 @@ Exporter for metrics, logs, and tracing from Oracle database 11 + 21.7.0.0 @@ -28,9 +29,28 @@ com.oracle.database.jdbc - ojdbc11-production - 21.3.0.0 - pom + ojdbc11 + ${oracle.jdbc.version} + + + com.oracle.database.jdbc + ucp + ${oracle.jdbc.version} + + + com.oracle.database.security + oraclepki + ${oracle.jdbc.version} + + + com.oracle.database.security + osdt_core + ${oracle.jdbc.version} + + + com.oracle.database.security + osdt_cert + ${oracle.jdbc.version} io.opentelemetry diff --git a/src/main/java/oracle/observability/ObservabilityExporter.java b/src/main/java/oracle/observability/ObservabilityExporter.java index f0209c2..4bf200f 100644 --- a/src/main/java/oracle/observability/ObservabilityExporter.java +++ b/src/main/java/oracle/observability/ObservabilityExporter.java @@ -6,21 +6,32 @@ import com.oracle.bmc.secrets.model.Base64SecretBundleContentDetails; import com.oracle.bmc.secrets.requests.GetSecretBundleRequest; import com.oracle.bmc.secrets.responses.GetSecretBundleResponse; +import oracle.observability.metrics.MetricsExporter; import oracle.ucp.jdbc.PoolDataSource; import oracle.ucp.jdbc.PoolDataSourceFactory; import org.apache.commons.codec.binary.Base64; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import java.io.File; import java.io.IOException; import java.sql.SQLException; +import java.util.HashMap; +import java.util.Map; public class ObservabilityExporter { + private static final Logger LOGGER = LoggerFactory.getLogger(ObservabilityExporter.class); public String DEFAULT_METRICS = System.getenv("DEFAULT_METRICS"); // "default-metrics.toml" + public File DEFAULT_METRICS_FILE; public String CUSTOM_METRICS = System.getenv("CUSTOM_METRICS"); // public String QUERY_TIMEOUT = System.getenv("QUERY_TIMEOUT"); // "5" public String DATABASE_MAXIDLECONNS = System.getenv("DATABASE_MAXIDLECONNS"); // "0" public String DATABASE_MAXOPENCONNS = System.getenv("DATABASE_MAXOPENCONNS"); // "10" - public String DATA_SOURCE_NAME = System.getenv("DATA_SOURCE_NAME"); //eg %USER%/$(dbpassword)@%PDB_NAME%_tp + public static String DATA_SOURCE_NAME = System.getenv("DATA_SOURCE_NAME"); //eg %USER%/$(dbpassword)@%PDB_NAME%_tp + public static String DATA_SOURCE_USER = System.getenv("DATA_SOURCE_USER"); //eg %USER%/$(dbpassword)@%PDB_NAME%_tp + public static String DATA_SOURCE_PASSWORD = System.getenv("DATA_SOURCE_PASSWORD"); //eg %USER%/$(dbpassword)@%PDB_NAME%_tp + public static String DATA_SOURCE_SERVICENAME = System.getenv("DATA_SOURCE_SERVICENAME"); //eg %USER%/$(dbpassword)@%PDB_NAME%_tp public String TNS_ADMIN = System.getenv("TNS_ADMIN"); //eg /msdataworkshop/creds public String OCI_REGION = System.getenv("OCI_REGION"); //eg us-ashburn-1 public String VAULT_SECRET_OCID = System.getenv("VAULT_SECRET_OCID"); //eg ocid.... @@ -29,27 +40,52 @@ public class ObservabilityExporter { public static final String CONTEXT = "context"; public static final String REQUEST = "request"; + static { + + if (DATA_SOURCE_USER != null && DATA_SOURCE_PASSWORD != null && DATA_SOURCE_SERVICENAME != null) { + DATA_SOURCE_NAME = DATA_SOURCE_USER + "/" + DATA_SOURCE_PASSWORD + "@" + DATA_SOURCE_SERVICENAME; + LOGGER.info("DATA_SOURCE_NAME = DATA_SOURCE_USER + \"/\" + DATA_SOURCE_PASSWORD + \"@\" + DATA_SOURCE_SERVICENAME"); + //eg %USER%/$(dbpassword)@%PDB_NAME%_tp + } + } PoolDataSource observabilityDB; + Map dataSourceNameToDataSourceMap = new HashMap<>(); + public PoolDataSource getPoolDataSource() throws SQLException { - if (observabilityDB != null) return observabilityDB; - observabilityDB = PoolDataSourceFactory.getPoolDataSource(); - observabilityDB.setConnectionFactoryClassName("oracle.jdbc.pool.OracleDataSource"); - String user = DATA_SOURCE_NAME.substring(0, DATA_SOURCE_NAME.indexOf("/")); - String pw = DATA_SOURCE_NAME.substring(DATA_SOURCE_NAME.indexOf("/") + 1, DATA_SOURCE_NAME.indexOf("@")); - String serviceName = DATA_SOURCE_NAME.substring(DATA_SOURCE_NAME.indexOf("@") + 1); + return getPoolDataSource(DATA_SOURCE_NAME); + } + public PoolDataSource getPoolDataSource(String dataSourceName) throws SQLException { + if (dataSourceName.equals(DATA_SOURCE_NAME)) { + if (observabilityDB != null) return observabilityDB; + return observabilityDB = getDataSource(DATA_SOURCE_NAME); + } else { + if(dataSourceNameToDataSourceMap.containsKey(dataSourceName) && dataSourceNameToDataSourceMap.get(dataSourceName) != null) + return dataSourceNameToDataSourceMap.get(dataSourceName); + PoolDataSource poolDataSource = getDataSource(dataSourceName); + dataSourceNameToDataSourceMap.put(dataSourceName, poolDataSource); + return poolDataSource; + } + } + + private PoolDataSource getDataSource(String dataSourceName) throws SQLException { + PoolDataSource poolDataSource = PoolDataSourceFactory.getPoolDataSource(); + poolDataSource.setConnectionFactoryClassName("oracle.jdbc.pool.OracleDataSource"); + String user = dataSourceName.substring(0, dataSourceName.indexOf("/")); + String pw = dataSourceName.substring(dataSourceName.indexOf("/") + 1, dataSourceName.indexOf("@")); + String serviceName = dataSourceName.substring(dataSourceName.indexOf("@") + 1); String url = "jdbc:oracle:thin:@" + serviceName + "?TNS_ADMIN=" + TNS_ADMIN; - observabilityDB.setURL(url); - observabilityDB.setUser(user); - if (VAULT_SECRET_OCID == null || VAULT_SECRET_OCID.trim().equals("")) { - observabilityDB.setPassword(pw); + poolDataSource.setURL(url); + poolDataSource.setUser(user); + if (VAULT_SECRET_OCID == null || VAULT_SECRET_OCID.trim().equals("") || !dataSourceName.equals(DATA_SOURCE_NAME)) { + poolDataSource.setPassword(pw); } else { try { - observabilityDB.setPassword(getPasswordFromVault()); + poolDataSource.setPassword(getPasswordFromVault()); } catch (IOException e) { throw new SQLException(e); } } - return observabilityDB; + return poolDataSource; } diff --git a/src/main/java/oracle/observability/metrics/MetricsExporter.java b/src/main/java/oracle/observability/metrics/MetricsExporter.java index 5eb87e1..967273b 100644 --- a/src/main/java/oracle/observability/metrics/MetricsExporter.java +++ b/src/main/java/oracle/observability/metrics/MetricsExporter.java @@ -7,6 +7,7 @@ import io.prometheus.client.Gauge; import oracle.observability.ObservabilityExporter; import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RestController; import org.slf4j.Logger; @@ -31,30 +32,38 @@ public class MetricsExporter extends ObservabilityExporter { public static final String LABELS = "labels"; public static final String IGNOREZERORESULT = "ignorezeroresult"; public static final String FALSE = "false"; - public String LISTEN_ADDRESS = System.getenv("LISTEN_ADDRESS"); // ":9161" - public String TELEMETRY_PATH = System.getenv("TELEMETRY_PATH"); // "/metrics" - //Interval between each scrape. Default is to scrape on collect requests. scrape.interval - public String SCRAPE_INTERVAL = System.getenv("scrape.interval"); // "0s" public static final String ORACLEDB_METRIC_PREFIX = "oracledb_"; Map gaugeMap = new HashMap<>(); + Map dnsToCollectorRegistryMap = new HashMap<>(); + + /** * The endpoint that prometheus will scrape * @return Prometheus metric - * @throws Exception */ @GetMapping(value = "/metrics", produces = "text/plain") public String metrics() throws Exception { - processMetrics(); - return getMetricsString(); + processMetrics(DATA_SOURCE_NAME, CollectorRegistry.defaultRegistry); + return getMetricsString(CollectorRegistry.defaultRegistry); + } + @GetMapping(value = "/scrape", produces = "text/plain") + public String scrape(@RequestParam("target") String target) throws Exception { + CollectorRegistry collectorRegistry = dnsToCollectorRegistryMap.get(target); + if (collectorRegistry == null) { + collectorRegistry = new CollectorRegistry(); + dnsToCollectorRegistryMap.put(target, collectorRegistry); + } + processMetrics(target, dnsToCollectorRegistryMap.get(target)); + return getMetricsString(collectorRegistry); } @PostConstruct public void init() throws Exception { - processMetrics(); + processMetrics(DATA_SOURCE_NAME, CollectorRegistry.defaultRegistry); } - private void processMetrics() throws IOException, SQLException { + private void processMetrics(String datasourceName, CollectorRegistry registry) throws IOException, SQLException { File tomlfile = new File(DEFAULT_METRICS); TomlMapper mapper = new TomlMapper(); JsonNode jsonNode = mapper.readerFor(MetricsExporterConfigEntry.class).readTree(new FileInputStream(tomlfile)); @@ -65,15 +74,15 @@ private void processMetrics() throws IOException, SQLException { } Iterator metrics = metric.iterator(); int isConnectionSuccessful = 0; - try(Connection connection = getPoolDataSource().getConnection()) { + try(Connection connection = getPoolDataSource(datasourceName).getConnection()) { isConnectionSuccessful = 1; while (metrics.hasNext()) { - processMetric(connection, metrics); + processMetric(registry, connection, metrics); } } finally { Gauge gauge = gaugeMap.get(ORACLEDB_METRIC_PREFIX + UP); if (gauge == null) { - Gauge upgauge = Gauge.build().name(ORACLEDB_METRIC_PREFIX + UP).help("Whether the Oracle database server is up.").register(); + Gauge upgauge = Gauge.build().name(ORACLEDB_METRIC_PREFIX + UP).help("Whether the Oracle database server is up.").register(registry); upgauge.set(isConnectionSuccessful); gaugeMap.put(ORACLEDB_METRIC_PREFIX + UP, upgauge); } else gauge.set(isConnectionSuccessful); @@ -91,7 +100,7 @@ private void processMetrics() throws IOException, SQLException { * Request string * IgnoreZeroResult bool */ - private void processMetric(Connection connection, Iterator metric) { + private void processMetric(CollectorRegistry registry, Connection connection, Iterator metric) { JsonNode next = metric.next(); String context = next.get(CONTEXT).asText(); // eg context = "teq" String metricsType = next.get(METRICSTYPE) == null ? "" :next.get(METRICSTYPE).asText(); @@ -120,7 +129,7 @@ private void processMetric(Connection connection, Iterator metric) { try { resultSet = connection.prepareStatement(request).executeQuery(); while (resultSet.next()) { - translateQueryToPrometheusMetric(context, metricsDescMap, labelNames, resultSet); + translateQueryToPrometheusMetric(registry, context, metricsDescMap, labelNames, resultSet); } } catch(SQLException e) { //this can be due to table not existing etc. LOGGER.warn("MetricsExporter.processMetric during:" + request + " exception:" + e); @@ -128,26 +137,19 @@ private void processMetric(Connection connection, Iterator metric) { } } - private void translateQueryToPrometheusMetric(String context, Map metricsDescMap, + private void translateQueryToPrometheusMetric(CollectorRegistry registry, String context, Map metricsDescMap, String[] labelNames, ResultSet resultSet) throws SQLException { String[] labelValues = new String[labelNames.length]; Map sqlQueryResults = - extractGaugesAndLabelValues(context, metricsDescMap, labelNames, resultSet, labelValues, resultSet.getMetaData().getColumnCount()); + extractGaugesAndLabelValues(registry, context, metricsDescMap, labelNames, resultSet, labelValues, resultSet.getMetaData().getColumnCount()); setLabelValues(context, labelNames, labelValues, sqlQueryResults.entrySet().iterator()); } /** * Creates Gauges and gets label values - * @param context - * @param metricsDescMap - * @param labelNames - * @param resultSet - * @param labelValues - * @param columnCount - * @throws SQLException */ - private Map extractGaugesAndLabelValues( + private Map extractGaugesAndLabelValues(CollectorRegistry registry, String context, Map metricsDescMap, String[] labelNames, ResultSet resultSet, String[] labelValues, int columnCount) throws SQLException { Map sqlQueryResults = new HashMap<>(); @@ -166,8 +168,8 @@ private Map extractGaugesAndLabelValues( if (gauge == null) { if(metricsDescMap.containsKey(columnName)) { if (labelNames.length > 0) { - gauge = Gauge.build().name(gaugeName.toLowerCase()).help(metricsDescMap.get(columnName)).labelNames(labelNames).register(); - } else gauge = Gauge.build().name(gaugeName.toLowerCase()).help(metricsDescMap.get(columnName)).register(); + gauge = Gauge.build().name(gaugeName.toLowerCase()).help(metricsDescMap.get(columnName)).labelNames(labelNames).register(registry); + } else gauge = Gauge.build().name(gaugeName.toLowerCase()).help(metricsDescMap.get(columnName)).register(registry); gaugeMap.put(gaugeName, gauge); } } @@ -198,8 +200,7 @@ private void setLabelValues(String context, String[] labelNames, String[] labelV } } - public static String getMetricsString() { - CollectorRegistry collectorRegistry = CollectorRegistry.defaultRegistry; + public static String getMetricsString(CollectorRegistry collectorRegistry) { Enumeration mfs = collectorRegistry.filteredMetricFamilySamples(new HashSet<>()); return compose(mfs); } From 50124f398f69dd7bb07b64a7c7b107f33b4114e1 Mon Sep 17 00:00:00 2001 From: Paul Parkinson Date: Thu, 4 May 2023 14:53:30 -0400 Subject: [PATCH 8/8] adds multi-datasource support based on configuration which includes OCI Vault support and various fixes. --- README.md | 24 +++ examples/metrics/default-metrics.toml | 8 +- examples/multidatasource_config.yaml | 22 +++ pom.xml | 4 + .../observability/DataSourceConfig.java | 16 ++ .../observability/ObservabilityExporter.java | 111 ++++++++---- .../observability/logs/LogsExporter.java | 1 + .../CollectorRegistryWithGaugeMap.java | 12 ++ .../metrics/MetricsExporter.java | 169 ++++++++++++------ .../tracing/TracingExporter.java | 1 + src/main/resources/application.properties | 1 - src/main/resources/application.yaml | 2 + 12 files changed, 284 insertions(+), 87 deletions(-) create mode 100644 examples/multidatasource_config.yaml create mode 100644 src/main/java/oracle/observability/DataSourceConfig.java create mode 100644 src/main/java/oracle/observability/metrics/CollectorRegistryWithGaugeMap.java delete mode 100644 src/main/resources/application.properties create mode 100644 src/main/resources/application.yaml diff --git a/README.md b/README.md index 066b90e..a1b0d38 100644 --- a/README.md +++ b/README.md @@ -98,6 +98,30 @@ OAuth2 https://spring.io/guides/tutorials/spring-boot-oauth2/ The reader is referred to this material to configure security and other aspects as appropriate. +## Support for Multiple Databases + +This feature can be used in two ways: + +1. By accessing the `/scrape` endpoint and passing a `target` parameter that is the full dsn value. + For example: `http://localhost:9161/scrape?target` +2. By accessing the `/scrape` endpoint and passing a `name` parameter that is the name of the datasource + as defined in the yaml file located at the environment variable `MULTI_DATASOURCE_CONFIG` + The yaml file takes the following form and an example can be found under examples/multidatasource_config.yaml : +``` + dataSourceName : + serviceName : + userName : + password : + TNS_ADMIN : + # if present, OCI Vault is used for password rather than "password" attribute + passwordOCID : + # the following is applicable only if OCI config file is used rather than instance principals authentication (generally only the case in development) + ociConfigFile : + ociRegion : + ociProfile : +``` +The feature is currently only applicable to metrics, not tracing or logging. +The feature currently uses the same global `DEFAULT_METRICS` config for every datasource. [Metrics Exporter]: Metrics.md [Log Exporter]: Logs.md diff --git a/examples/metrics/default-metrics.toml b/examples/metrics/default-metrics.toml index b7d8f7f..7aa16dc 100644 --- a/examples/metrics/default-metrics.toml +++ b/examples/metrics/default-metrics.toml @@ -3,4 +3,10 @@ context = "sessions" labels = ["inst_id", "status", "type"] metricsdesc = { value = "Gauge metric with count of sessions by status and type." } request = "select inst_id, status, type, count(*) as value from gv$session group by status, type, inst_id" -ignorezeroresult = true \ No newline at end of file +ignorezeroresult = true + +[[metric]] +context = "context_with_labels" +labels = [ "label_1", "label_2" ] +request = "SELECT 1 as value_1, 2 as value_2, 'First label' as label_1, 'Second label' as label_2 FROM DUAL" +metricsdesc = { value_1 = "Simple example returning always 1.", value_2 = "Same but returning always 2." } \ No newline at end of file diff --git a/examples/multidatasource_config.yaml b/examples/multidatasource_config.yaml new file mode 100644 index 0000000..a4f8aaa --- /dev/null +++ b/examples/multidatasource_config.yaml @@ -0,0 +1,22 @@ +dataSourceMyFirstDB : + serviceName : myservicename1 + userName : myuser1 + password : mypassword1 + TNS_ADMIN : /somefolder/Wallet_somewallet1 + # if present, OCI Vault is used for password rather than "password" attribute +# passwordOCID : + # the following is applicable only if OCI config file is used rather than instance principals authentication (generally only the case in development) +# ociConfigFile : +# ociRegion : +# ociProfile : +dataSourceMySecondDB : + serviceName : myservicename1 + userName : myuser2 + password : mypassword2 + TNS_ADMIN : /somefolder/Wallet_somewallet2 +# # if present, OCI Vault is used for password rather than "password" attribute +# passwordOCID : + # the following is applicable only if OCI config file is used rather than instance principals authentication (generally only the case in development) +# ociConfigFile : +# ociRegion : +# ociProfile : \ No newline at end of file diff --git a/pom.xml b/pom.xml index e6c9743..143e4b7 100644 --- a/pom.xml +++ b/pom.xml @@ -127,6 +127,10 @@ oci-java-sdk-secrets 1.32.2 + + org.projectlombok + lombok + diff --git a/src/main/java/oracle/observability/DataSourceConfig.java b/src/main/java/oracle/observability/DataSourceConfig.java new file mode 100644 index 0000000..1168e71 --- /dev/null +++ b/src/main/java/oracle/observability/DataSourceConfig.java @@ -0,0 +1,16 @@ +package oracle.observability; + +import lombok.Data; + +@Data +public class DataSourceConfig { + private String dataSourceName; + private String serviceName; + private String userName; + private String password; + private String TNS_ADMIN; + private String passwordOCID; + private String ociConfigFile; + private String ociRegion; + private String ociProfile; +} diff --git a/src/main/java/oracle/observability/ObservabilityExporter.java b/src/main/java/oracle/observability/ObservabilityExporter.java index 4bf200f..2cd130d 100644 --- a/src/main/java/oracle/observability/ObservabilityExporter.java +++ b/src/main/java/oracle/observability/ObservabilityExporter.java @@ -6,15 +6,13 @@ import com.oracle.bmc.secrets.model.Base64SecretBundleContentDetails; import com.oracle.bmc.secrets.requests.GetSecretBundleRequest; import com.oracle.bmc.secrets.responses.GetSecretBundleResponse; -import oracle.observability.metrics.MetricsExporter; import oracle.ucp.jdbc.PoolDataSource; import oracle.ucp.jdbc.PoolDataSourceFactory; import org.apache.commons.codec.binary.Base64; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; -import java.io.IOException; +import java.io.*; import java.sql.SQLException; import java.util.HashMap; import java.util.Map; @@ -26,61 +24,110 @@ public class ObservabilityExporter { public File DEFAULT_METRICS_FILE; public String CUSTOM_METRICS = System.getenv("CUSTOM_METRICS"); // public String QUERY_TIMEOUT = System.getenv("QUERY_TIMEOUT"); // "5" + public static final String CONTEXT = "context"; + public static final String REQUEST = "request"; + + //Single/global datasource config related.... public String DATABASE_MAXIDLECONNS = System.getenv("DATABASE_MAXIDLECONNS"); // "0" public String DATABASE_MAXOPENCONNS = System.getenv("DATABASE_MAXOPENCONNS"); // "10" public static String DATA_SOURCE_NAME = System.getenv("DATA_SOURCE_NAME"); //eg %USER%/$(dbpassword)@%PDB_NAME%_tp - public static String DATA_SOURCE_USER = System.getenv("DATA_SOURCE_USER"); //eg %USER%/$(dbpassword)@%PDB_NAME%_tp - public static String DATA_SOURCE_PASSWORD = System.getenv("DATA_SOURCE_PASSWORD"); //eg %USER%/$(dbpassword)@%PDB_NAME%_tp - public static String DATA_SOURCE_SERVICENAME = System.getenv("DATA_SOURCE_SERVICENAME"); //eg %USER%/$(dbpassword)@%PDB_NAME%_tp - public String TNS_ADMIN = System.getenv("TNS_ADMIN"); //eg /msdataworkshop/creds - public String OCI_REGION = System.getenv("OCI_REGION"); //eg us-ashburn-1 - public String VAULT_SECRET_OCID = System.getenv("VAULT_SECRET_OCID"); //eg ocid.... - public String OCI_CONFIG_FILE = System.getenv("OCI_CONFIG_FILE"); //eg "~/.oci/config" - public String OCI_PROFILE = System.getenv("OCI_PROFILE"); //eg "DEFAULT" - public static final String CONTEXT = "context"; - public static final String REQUEST = "request"; + //if all three of the following exist, they are internally concatenated and override/used as DATA_SOURCE_NAME + public static String DATA_SOURCE_USER = System.getenv("DATA_SOURCE_USER"); //eg %USER% + public static String DATA_SOURCE_PASSWORD = System.getenv("DATA_SOURCE_PASSWORD"); //eg $(dbpassword) + public static String DATA_SOURCE_SERVICENAME = System.getenv("DATA_SOURCE_SERVICENAME"); //eg %PDB_NAME%_tp + public static String TNS_ADMIN = System.getenv("TNS_ADMIN"); //eg /msdataworkshop/creds - static { + public static String OCI_REGION = System.getenv("OCI_REGION"); //eg us-ashburn-1 + public static String VAULT_SECRET_OCID = System.getenv("VAULT_SECRET_OCID"); //eg ocid.... + public static String OCI_CONFIG_FILE = System.getenv("OCI_CONFIG_FILE"); //eg "~/.oci/config" + public static String OCI_PROFILE = System.getenv("OCI_PROFILE"); //eg "DEFAULT" + //MULTI_DATASOURCE_CONFIG related.... + public static String MULTI_DATASOURCE_CONFIG = System.getenv("MULTI_DATASOURCE_CONFIG"); + public static final String SERVICE_NAME_STRING = "serviceName"; + public static final String USER_NAME_STRING = "userName"; + public static final String PASSWORD_STRING = "password"; + public static final String TNS_ADMIN_STRING = "TNS_ADMIN"; + public static final String PASSWORD_OCID_STRING = "passwordOCID"; + public static final String OCI_CONFIG_FILE_STRING = "ociConfigFile"; + public static final String OCI_REGION_STRING = "ociRegion"; + public static final String OCI_PROFILE_STRING = "ociProfile"; + + static { // not really necessary but gives information that a global datasource is in use if (DATA_SOURCE_USER != null && DATA_SOURCE_PASSWORD != null && DATA_SOURCE_SERVICENAME != null) { DATA_SOURCE_NAME = DATA_SOURCE_USER + "/" + DATA_SOURCE_PASSWORD + "@" + DATA_SOURCE_SERVICENAME; LOGGER.info("DATA_SOURCE_NAME = DATA_SOURCE_USER + \"/\" + DATA_SOURCE_PASSWORD + \"@\" + DATA_SOURCE_SERVICENAME"); //eg %USER%/$(dbpassword)@%PDB_NAME%_tp } } - PoolDataSource observabilityDB; + PoolDataSource globalObservabilityDB; + + //This map is used for multi-datasource scraping, both when using dns target string and config Map dataSourceNameToDataSourceMap = new HashMap<>(); + //This map is used for multi-datasource scraping when using config only + public static Map dataSourceNameToDataSourceConfigMap = new HashMap<>(); + + //used by logs and tracing exporters as they do not currently support multi-datasource config public PoolDataSource getPoolDataSource() throws SQLException { - return getPoolDataSource(DATA_SOURCE_NAME); + return getPoolDataSource(DATA_SOURCE_NAME, false); } - public PoolDataSource getPoolDataSource(String dataSourceName) throws SQLException { - if (dataSourceName.equals(DATA_SOURCE_NAME)) { - if (observabilityDB != null) return observabilityDB; - return observabilityDB = getDataSource(DATA_SOURCE_NAME); + + public PoolDataSource getPoolDataSource(String dataSourceName, boolean isScrapeByName) throws SQLException { + if (DATA_SOURCE_NAME != null && dataSourceName.equals(DATA_SOURCE_NAME)) { + if (globalObservabilityDB != null) return globalObservabilityDB; + return globalObservabilityDB = getDataSource(DATA_SOURCE_NAME); } else { if(dataSourceNameToDataSourceMap.containsKey(dataSourceName) && dataSourceNameToDataSourceMap.get(dataSourceName) != null) return dataSourceNameToDataSourceMap.get(dataSourceName); - PoolDataSource poolDataSource = getDataSource(dataSourceName); + + System.out.println("putting dataSourceName:" + dataSourceName + " isScrapeByName:" + isScrapeByName + + " ObservabilityExporter.dataSourceNameToDataSourceConfigMap.get(dataSourceName):"+ + ObservabilityExporter.dataSourceNameToDataSourceConfigMap.get(dataSourceName)); + PoolDataSource poolDataSource = isScrapeByName? + getDataSource(ObservabilityExporter.dataSourceNameToDataSourceConfigMap.get(dataSourceName)) + :getDataSource(dataSourceName); dataSourceNameToDataSourceMap.put(dataSourceName, poolDataSource); return poolDataSource; } } private PoolDataSource getDataSource(String dataSourceName) throws SQLException { - PoolDataSource poolDataSource = PoolDataSourceFactory.getPoolDataSource(); - poolDataSource.setConnectionFactoryClassName("oracle.jdbc.pool.OracleDataSource"); String user = dataSourceName.substring(0, dataSourceName.indexOf("/")); String pw = dataSourceName.substring(dataSourceName.indexOf("/") + 1, dataSourceName.indexOf("@")); String serviceName = dataSourceName.substring(dataSourceName.indexOf("@") + 1); - String url = "jdbc:oracle:thin:@" + serviceName + "?TNS_ADMIN=" + TNS_ADMIN; + return getPoolDataSource(dataSourceName, user, pw, serviceName, TNS_ADMIN, + VAULT_SECRET_OCID, OCI_CONFIG_FILE, OCI_PROFILE, OCI_REGION, false); + } + private PoolDataSource getDataSource(DataSourceConfig dataSourceConfig) throws SQLException { + return getPoolDataSource(dataSourceConfig.getDataSourceName(), + dataSourceConfig.getUserName(), + dataSourceConfig.getPassword(), + dataSourceConfig.getServiceName(), + dataSourceConfig.getTNS_ADMIN(), + dataSourceConfig.getPasswordOCID(), + dataSourceConfig.getOciConfigFile(), + dataSourceConfig.getOciProfile(), + dataSourceConfig.getOciRegion(), + true); + } + + private PoolDataSource getPoolDataSource( + String dataSourceName, String user, String pw, String serviceName, String tnsAdmin, + String vaultSecretOcid, String ociConfigFile, String ociProfile, String ociRegion, boolean isScrapeByName) throws SQLException { + System.out.println("getPoolDataSource dataSourceName = " + dataSourceName + ", user = " + user + ", pw = " + pw + ", serviceName = " + serviceName + ", vaultSecretOcid = " + vaultSecretOcid + ", ociConfigFile = " + ociConfigFile + ", ociProfile = " + ociProfile + ", ociRegion = " + ociRegion + ", isScrapeByName = " + isScrapeByName); + PoolDataSource poolDataSource = PoolDataSourceFactory.getPoolDataSource(); + poolDataSource.setConnectionFactoryClassName("oracle.jdbc.pool.OracleDataSource"); + String url = "jdbc:oracle:thin:@" + serviceName + "?TNS_ADMIN=" + tnsAdmin; poolDataSource.setURL(url); poolDataSource.setUser(user); - if (VAULT_SECRET_OCID == null || VAULT_SECRET_OCID.trim().equals("") || !dataSourceName.equals(DATA_SOURCE_NAME)) { + if (VAULT_SECRET_OCID == null || VAULT_SECRET_OCID.trim().equals("") || + //vault is not supported with scrape by dns currently, only with scrape by datasource name and global datasource + (!isScrapeByName && !dataSourceName.equals(DATA_SOURCE_NAME)) ) { poolDataSource.setPassword(pw); } else { try { - poolDataSource.setPassword(getPasswordFromVault()); + poolDataSource.setPassword(getPasswordFromVault(vaultSecretOcid, ociConfigFile, ociProfile, ociRegion)); } catch (IOException e) { throw new SQLException(e); } @@ -89,18 +136,18 @@ private PoolDataSource getDataSource(String dataSourceName) throws SQLException } - public String getPasswordFromVault() throws IOException { + public String getPasswordFromVault(String vaultSecretOcid, String ociConfigFile, String ociProfile, String ociRegion) throws IOException { SecretsClient secretsClient; - if (OCI_CONFIG_FILE == null || OCI_CONFIG_FILE.trim().equals("")) { + if (ociConfigFile == null || ociConfigFile.trim().equals("")) { secretsClient = new SecretsClient(InstancePrincipalsAuthenticationDetailsProvider.builder().build()); } else { - String profile = OCI_PROFILE==null || OCI_PROFILE.trim().equals("") ? "DEFAULT": OCI_PROFILE; - secretsClient = new SecretsClient(new ConfigFileAuthenticationDetailsProvider(OCI_CONFIG_FILE, profile)); + String profile = ociProfile ==null || ociProfile.trim().equals("") ? "DEFAULT": ociProfile; + secretsClient = new SecretsClient(new ConfigFileAuthenticationDetailsProvider(ociConfigFile, profile)); } - secretsClient.setRegion(OCI_REGION); + secretsClient.setRegion(ociRegion); GetSecretBundleRequest getSecretBundleRequest = GetSecretBundleRequest .builder() - .secretId(VAULT_SECRET_OCID) + .secretId(vaultSecretOcid ) .stage(GetSecretBundleRequest.Stage.Current) .build(); GetSecretBundleResponse getSecretBundleResponse = secretsClient.getSecretBundle(getSecretBundleRequest); diff --git a/src/main/java/oracle/observability/logs/LogsExporter.java b/src/main/java/oracle/observability/logs/LogsExporter.java index 2b8890c..f82f642 100644 --- a/src/main/java/oracle/observability/logs/LogsExporter.java +++ b/src/main/java/oracle/observability/logs/LogsExporter.java @@ -43,6 +43,7 @@ public void run() { LOGGER.debug("LogsExporter default metrics from:" + DEFAULT_METRICS); if(LOG_INTERVAL!=null && !LOG_INTERVAL.trim().equals("")) logInterval = Integer.getInteger(LOG_INTERVAL); LOGGER.debug("LogsExporter logInterval:" + logInterval); + //todo move to common/ObservabilityExporter location and log something friendly if it does not exist and exit, ie fast fail startup File tomlfile = new File(DEFAULT_METRICS); TomlMapper mapper = new TomlMapper(); JsonNode jsonNode = mapper.readerFor(LogsExporterConfigEntry.class).readTree(new FileInputStream(tomlfile)); diff --git a/src/main/java/oracle/observability/metrics/CollectorRegistryWithGaugeMap.java b/src/main/java/oracle/observability/metrics/CollectorRegistryWithGaugeMap.java new file mode 100644 index 0000000..b928a63 --- /dev/null +++ b/src/main/java/oracle/observability/metrics/CollectorRegistryWithGaugeMap.java @@ -0,0 +1,12 @@ +package oracle.observability.metrics; + +import io.prometheus.client.CollectorRegistry; +import io.prometheus.client.Gauge; + +import java.util.HashMap; +import java.util.Map; + +public class CollectorRegistryWithGaugeMap extends CollectorRegistry { + Map gaugeMap = new HashMap<>(); + +} diff --git a/src/main/java/oracle/observability/metrics/MetricsExporter.java b/src/main/java/oracle/observability/metrics/MetricsExporter.java index 967273b..6744ba6 100644 --- a/src/main/java/oracle/observability/metrics/MetricsExporter.java +++ b/src/main/java/oracle/observability/metrics/MetricsExporter.java @@ -5,6 +5,7 @@ import io.prometheus.client.Collector; import io.prometheus.client.CollectorRegistry; import io.prometheus.client.Gauge; +import oracle.observability.DataSourceConfig; import oracle.observability.ObservabilityExporter; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.RequestParam; @@ -12,11 +13,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.yaml.snakeyaml.Yaml; import javax.annotation.PostConstruct; -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; +import java.io.*; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; @@ -33,58 +33,106 @@ public class MetricsExporter extends ObservabilityExporter { public static final String IGNOREZERORESULT = "ignorezeroresult"; public static final String FALSE = "false"; public static final String ORACLEDB_METRIC_PREFIX = "oracledb_"; - Map gaugeMap = new HashMap<>(); - Map dnsToCollectorRegistryMap = new HashMap<>(); - + //This map is used for multi-datasource scraping, both when using dns target string and config + Map dnsToCollectorRegistryMap = new HashMap<>(); + CollectorRegistryWithGaugeMap defaultRegistry = new CollectorRegistryWithGaugeMap(); /** * The endpoint that prometheus will scrape + * * @return Prometheus metric */ @GetMapping(value = "/metrics", produces = "text/plain") public String metrics() throws Exception { - processMetrics(DATA_SOURCE_NAME, CollectorRegistry.defaultRegistry); + processMetrics(DATA_SOURCE_NAME, defaultRegistry, false); return getMetricsString(CollectorRegistry.defaultRegistry); } + @GetMapping(value = "/scrape", produces = "text/plain") public String scrape(@RequestParam("target") String target) throws Exception { - CollectorRegistry collectorRegistry = dnsToCollectorRegistryMap.get(target); + CollectorRegistryWithGaugeMap collectorRegistry = dnsToCollectorRegistryMap.get(target); if (collectorRegistry == null) { - collectorRegistry = new CollectorRegistry(); + collectorRegistry = new CollectorRegistryWithGaugeMap(); dnsToCollectorRegistryMap.put(target, collectorRegistry); } - processMetrics(target, dnsToCollectorRegistryMap.get(target)); + processMetrics(target, dnsToCollectorRegistryMap.get(target), false); + return getMetricsString(collectorRegistry); + } + + @GetMapping(value = "/scrapeByName", produces = "text/plain") + public String scrapeByConfigName(@RequestParam("name") String name) throws Exception { + CollectorRegistryWithGaugeMap collectorRegistry = dnsToCollectorRegistryMap.get(name); + if (collectorRegistry == null) { + collectorRegistry = new CollectorRegistryWithGaugeMap(); + dnsToCollectorRegistryMap.put(name, collectorRegistry); + } + processMetrics(name, dnsToCollectorRegistryMap.get(name), true); return getMetricsString(collectorRegistry); } @PostConstruct public void init() throws Exception { - processMetrics(DATA_SOURCE_NAME, CollectorRegistry.defaultRegistry); + boolean isGlobalDataSourceSpecified = DATA_SOURCE_NAME != null && !DATA_SOURCE_NAME.trim().equals(""); + boolean isMultiDataSourceConfigSpecified = MULTI_DATASOURCE_CONFIG != null || !MULTI_DATASOURCE_CONFIG.trim().equals(""); + if (!isMultiDataSourceConfigSpecified && !isGlobalDataSourceSpecified) + throw new Exception( + "Neither global datasource (DATA_SOURCE_NAME) nor multi-datasource (MULTI_DATASOURCE_CONFIG) " + + "specified. One or both are required."); + if (isMultiDataSourceConfigSpecified) parseMultiDataSourceConfig(); + if (isGlobalDataSourceSpecified) processMetrics(DATA_SOURCE_NAME, defaultRegistry, false); } - private void processMetrics(String datasourceName, CollectorRegistry registry) throws IOException, SQLException { + //Currently this is only supported for metrics and so is called from here + //If/when it is supported by other exporters it should be moved to common/Observability exporter + //Failure to find file, if specified, results in exit + public void parseMultiDataSourceConfig() throws FileNotFoundException { + File file = new File(MULTI_DATASOURCE_CONFIG); + InputStream inputStream = new FileInputStream(file); + Yaml yaml = new Yaml(); + Map> config = yaml.load(inputStream); + for (Map.Entry> entry : config.entrySet()) { + DataSourceConfig dataSourceConfigForMap = new DataSourceConfig(); + String dataSourceName = entry.getKey(); + Map dataSourceConfig = entry.getValue(); + dataSourceConfigForMap.setDataSourceName(dataSourceName); //the key is also part of the config for convenience + dataSourceConfigForMap.setServiceName(dataSourceConfig.get(SERVICE_NAME_STRING)); + dataSourceConfigForMap.setUserName(dataSourceConfig.get(USER_NAME_STRING)); + dataSourceConfigForMap.setPassword(dataSourceConfig.get(PASSWORD_STRING)); + dataSourceConfigForMap.setTNS_ADMIN(dataSourceConfig.get(TNS_ADMIN_STRING)); + dataSourceConfigForMap.setPasswordOCID(dataSourceConfig.get(PASSWORD_OCID_STRING)); + dataSourceConfigForMap.setOciRegion(dataSourceConfig.get(OCI_CONFIG_FILE_STRING)); + dataSourceConfigForMap.setOciRegion(dataSourceConfig.get(OCI_REGION_STRING)); + dataSourceConfigForMap.setOciProfile(dataSourceConfig.get(OCI_PROFILE_STRING)); + LOGGER.info("adding dataSource from config:" + dataSourceName); + dataSourceNameToDataSourceConfigMap.put(dataSourceName, dataSourceConfigForMap); + } + } + + private void processMetrics(String datasourceName, CollectorRegistryWithGaugeMap registry, boolean isScrapeByName) throws IOException, SQLException { + if (DEFAULT_METRICS == null || DEFAULT_METRICS.trim().equals("")) + throw new FileNotFoundException("DEFAULT_METRICS file location must be specified"); File tomlfile = new File(DEFAULT_METRICS); TomlMapper mapper = new TomlMapper(); JsonNode jsonNode = mapper.readerFor(MetricsExporterConfigEntry.class).readTree(new FileInputStream(tomlfile)); JsonNode metric = jsonNode.get("metric"); - if(metric == null || metric.isEmpty()) { + if (metric == null || metric.isEmpty()) { LOGGER.info("No logs records configured"); return; } Iterator metrics = metric.iterator(); int isConnectionSuccessful = 0; - try(Connection connection = getPoolDataSource(datasourceName).getConnection()) { + try (Connection connection = getPoolDataSource(datasourceName, isScrapeByName).getConnection()) { isConnectionSuccessful = 1; while (metrics.hasNext()) { processMetric(registry, connection, metrics); } - } finally { - Gauge gauge = gaugeMap.get(ORACLEDB_METRIC_PREFIX + UP); + } finally { //always set the db health/up metric - if a connection is unobtainable the metric is set to down + Gauge gauge = registry.gaugeMap.get(ORACLEDB_METRIC_PREFIX + UP); if (gauge == null) { Gauge upgauge = Gauge.build().name(ORACLEDB_METRIC_PREFIX + UP).help("Whether the Oracle database server is up.").register(registry); upgauge.set(isConnectionSuccessful); - gaugeMap.put(ORACLEDB_METRIC_PREFIX + UP, upgauge); + registry.gaugeMap.put(ORACLEDB_METRIC_PREFIX + UP, upgauge); } else gauge.set(isConnectionSuccessful); } } @@ -100,15 +148,15 @@ private void processMetrics(String datasourceName, CollectorRegistry registry) t * Request string * IgnoreZeroResult bool */ - private void processMetric(CollectorRegistry registry, Connection connection, Iterator metric) { + private void processMetric(CollectorRegistryWithGaugeMap registry, Connection connection, Iterator metric) { JsonNode next = metric.next(); String context = next.get(CONTEXT).asText(); // eg context = "teq" - String metricsType = next.get(METRICSTYPE) == null ? "" :next.get(METRICSTYPE).asText(); + String metricsType = next.get(METRICSTYPE) == null ? "" : next.get(METRICSTYPE).asText(); JsonNode metricsdescNode = next.get(METRICSDESC); // eg metricsdesc = { enqueued_msgs = "Total enqueued messages.", dequeued_msgs = "Total dequeued messages.", remained_msgs = "Total remained messages."} Iterator> metricsdescIterator = metricsdescNode.fields(); Map metricsDescMap = new HashMap<>(); - while(metricsdescIterator.hasNext()) { + while (metricsdescIterator.hasNext()) { Map.Entry metricsdesc = metricsdescIterator.next(); metricsDescMap.put(metricsdesc.getKey(), metricsdesc.getValue().asText()); } @@ -127,75 +175,84 @@ private void processMetric(CollectorRegistry registry, Connection connection, It String ignorezeroresult = next.get(IGNOREZERORESULT) == null ? FALSE : next.get(IGNOREZERORESULT).asText(); //todo, currently defaults to true ResultSet resultSet; try { - resultSet = connection.prepareStatement(request).executeQuery(); - while (resultSet.next()) { - translateQueryToPrometheusMetric(registry, context, metricsDescMap, labelNames, resultSet); - } - } catch(SQLException e) { //this can be due to table not existing etc. + resultSet = connection.prepareStatement(request).executeQuery(); + while (resultSet.next()) { + translateQueryToPrometheusMetric(registry, context, metricsDescMap, labelNames, resultSet); + } + } catch (SQLException e) { //this can be due to table not existing etc. LOGGER.warn("MetricsExporter.processMetric during:" + request + " exception:" + e); - return; } } - private void translateQueryToPrometheusMetric(CollectorRegistry registry, String context, Map metricsDescMap, + private void translateQueryToPrometheusMetric(CollectorRegistryWithGaugeMap registry, String context, Map metricsDescMap, String[] labelNames, ResultSet resultSet) throws SQLException { String[] labelValues = new String[labelNames.length]; - Map sqlQueryResults = + Map sqlQueryResults = extractGaugesAndLabelValues(registry, context, metricsDescMap, labelNames, resultSet, labelValues, resultSet.getMetaData().getColumnCount()); - setLabelValues(context, labelNames, labelValues, sqlQueryResults.entrySet().iterator()); + if(sqlQueryResults == null || sqlQueryResults.entrySet() == null || sqlQueryResults.entrySet().isEmpty()) { + LOGGER.error("Description for column is missing"); + } + setLabelValues(registry, context, labelNames, labelValues, sqlQueryResults.entrySet().iterator()); } /** * Creates Gauges and gets label values */ - private Map extractGaugesAndLabelValues(CollectorRegistry registry, - String context, Map metricsDescMap, String[] labelNames, ResultSet resultSet, - String[] labelValues, int columnCount) throws SQLException { - Map sqlQueryResults = new HashMap<>(); + private Map extractGaugesAndLabelValues(CollectorRegistryWithGaugeMap registry, + String context, Map metricsDescMap, String[] labelNames, ResultSet resultSet, + String[] labelValues, int columnCount) throws SQLException { + Map sqlQueryResults = new HashMap<>(); String columnName; String columnTypeName; for (int i = 0; i < columnCount; i++) { //for each column... columnName = resultSet.getMetaData().getColumnName(i + 1).toLowerCase(); columnTypeName = resultSet.getMetaData().getColumnTypeName(i + 1); - if (columnTypeName.equals("VARCHAR2")) //. typename is 2/NUMBER or 12/VARCHAR2 - ; - else + if (columnTypeName.equals("VARCHAR2") || columnTypeName.equals("CHAR")) //. typename is 2/NUMBER or 12/VARCHAR2 + sqlQueryResults.put(resultSet.getMetaData().getColumnName(i + 1), resultSet.getString(i + 1)); + else { + LOGGER.debug("columnTypeName:" + columnTypeName); sqlQueryResults.put(resultSet.getMetaData().getColumnName(i + 1), resultSet.getLong(i + 1)); + } String gaugeName = ORACLEDB_METRIC_PREFIX + context + "_" + columnName; LOGGER.debug("---gaugeName:" + gaugeName); - Gauge gauge = gaugeMap.get(gaugeName); + Gauge gauge = registry.gaugeMap.get(gaugeName); if (gauge == null) { - if(metricsDescMap.containsKey(columnName)) { + if (metricsDescMap.containsKey(columnName)) { if (labelNames.length > 0) { gauge = Gauge.build().name(gaugeName.toLowerCase()).help(metricsDescMap.get(columnName)).labelNames(labelNames).register(registry); - } else gauge = Gauge.build().name(gaugeName.toLowerCase()).help(metricsDescMap.get(columnName)).register(registry); - gaugeMap.put(gaugeName, gauge); + } else + gauge = Gauge.build().name(gaugeName.toLowerCase()).help(metricsDescMap.get(columnName)).register(registry); + registry.gaugeMap.put(gaugeName, gauge); } } - for (int ii = 0; ii< labelNames.length; ii++) { - if(labelNames[ii].equals(columnName)) labelValues[ii] = resultSet.getString(i+1); + for (int ii = 0; ii < labelNames.length; ii++) { + if (labelNames[ii].equals(columnName)) labelValues[ii] = resultSet.getString(i + 1); } } return sqlQueryResults; } - private void setLabelValues(String context, String[] labelNames, String[] labelValues, Iterator> sqlQueryRestulsEntryIterator) { - while(sqlQueryRestulsEntryIterator.hasNext()) { //for each column - Map.Entry sqlQueryResultsEntry = sqlQueryRestulsEntryIterator.next(); + private void setLabelValues(CollectorRegistryWithGaugeMap registry, String context, String[] labelNames, String[] labelValues, + Iterator> sqlQueryRestulsEntryIterator) { + while (sqlQueryRestulsEntryIterator.hasNext()) { //for each column + Map.Entry sqlQueryResultsEntry = sqlQueryRestulsEntryIterator.next(); boolean isLabel = false; - for (int ii = 0; ii< labelNames.length; ii++) { - if(labelNames[ii].equals(sqlQueryResultsEntry.getKey())) isLabel =true; // continue + for (String labelName : labelNames) { + if (labelName.equals(sqlQueryResultsEntry.getKey())) isLabel = true; // continue } - if(!isLabel) { - int valueToSet = (int) Math.rint(sqlQueryResultsEntry.getValue().intValue()); - if(labelValues.length >0 ) + if (!isLabel) { + Object valueToSet = sqlQueryResultsEntry.getValue(); + if (labelValues.length > 0) try { - gaugeMap.get(ORACLEDB_METRIC_PREFIX + context + "_" + sqlQueryResultsEntry.getKey().toLowerCase()).labels(labelValues).set(valueToSet); + if(valueToSet instanceof Integer) registry.gaugeMap.get(ORACLEDB_METRIC_PREFIX + context + "_" + + sqlQueryResultsEntry.getKey().toLowerCase()).labels(labelValues).set(Math.rint((Integer)valueToSet)); } catch (Exception ex) { //todo filter to avoid unnecessary exception handling LOGGER.debug("OracleDBMetricsExporter.translateQueryToPrometheusMetric Exc:" + ex); } - else gaugeMap.get(ORACLEDB_METRIC_PREFIX + context + "_" + sqlQueryResultsEntry.getKey().toLowerCase()).set(valueToSet); + else + registry.gaugeMap.get(ORACLEDB_METRIC_PREFIX + context + "_" + + sqlQueryResultsEntry.getKey().toLowerCase()).labels(labelValues).set(Integer.parseInt("" + valueToSet)); } } } @@ -221,7 +278,13 @@ private static String compose(Enumeration mfs) { .append(typeString(metricFamilySamples.type)) .append('\n'); - for (Collector.MetricFamilySamples.Sample sample: metricFamilySamples.samples) { +// result.append("# DEBUG ") +// .append("metricFamilySamples.samples.size()") +// .append(' ') +// .append(metricFamilySamples.samples.size()) +// .append('\n'); + + for (Collector.MetricFamilySamples.Sample sample : metricFamilySamples.samples) { result.append(sample.name); if (!sample.labelNames.isEmpty()) { result.append('{'); diff --git a/src/main/java/oracle/observability/tracing/TracingExporter.java b/src/main/java/oracle/observability/tracing/TracingExporter.java index c943ef2..f6159ad 100644 --- a/src/main/java/oracle/observability/tracing/TracingExporter.java +++ b/src/main/java/oracle/observability/tracing/TracingExporter.java @@ -86,6 +86,7 @@ public void run() { traceInterval = Integer.getInteger(TRACE_INTERVAL); else traceInterval = traceIntervalDefault; LOGGER.debug("TracingExporter traceInterval:" + traceInterval); + //todo move to common/ObservabilityExporter location and log something friendly if it does not exist and exit, ie fast fail startup File tomlfile = new File(DEFAULT_METRICS); TomlMapper mapper = new TomlMapper(); JsonNode jsonNode; diff --git a/src/main/resources/application.properties b/src/main/resources/application.properties deleted file mode 100644 index cfbf3a1..0000000 --- a/src/main/resources/application.properties +++ /dev/null @@ -1 +0,0 @@ -server.port=9161 diff --git a/src/main/resources/application.yaml b/src/main/resources/application.yaml new file mode 100644 index 0000000..0e1f425 --- /dev/null +++ b/src/main/resources/application.yaml @@ -0,0 +1,2 @@ +server: + port : 9161 \ No newline at end of file